var/home/core/zuul-output/0000755000175000017500000000000015110133067014522 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110151212015456 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004753526315110151203017701 0ustar rootrootNov 21 19:01:48 crc systemd[1]: Starting Kubernetes Kubelet... Nov 21 19:01:48 crc restorecon[4678]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:48 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 19:01:49 crc restorecon[4678]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 21 19:01:49 crc kubenswrapper[4701]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 19:01:49 crc kubenswrapper[4701]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 21 19:01:49 crc kubenswrapper[4701]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 19:01:49 crc kubenswrapper[4701]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 19:01:49 crc kubenswrapper[4701]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 21 19:01:49 crc kubenswrapper[4701]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.678904 4701 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683856 4701 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683886 4701 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683897 4701 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683905 4701 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683914 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683928 4701 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683936 4701 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683944 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683952 4701 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683959 4701 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683969 4701 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683980 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683988 4701 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.683996 4701 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684004 4701 feature_gate.go:330] unrecognized feature gate: Example Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684012 4701 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684020 4701 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684028 4701 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684038 4701 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684047 4701 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684056 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684066 4701 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684075 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684084 4701 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684093 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684100 4701 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684108 4701 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684116 4701 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684124 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684132 4701 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684140 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684147 4701 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684155 4701 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684185 4701 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684193 4701 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684224 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684233 4701 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684241 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684248 4701 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684256 4701 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684264 4701 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684272 4701 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684299 4701 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684309 4701 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684317 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684326 4701 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684334 4701 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684342 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684350 4701 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684359 4701 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684367 4701 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684376 4701 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684384 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684391 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684399 4701 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684409 4701 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684418 4701 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684425 4701 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684433 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684441 4701 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684449 4701 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684457 4701 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684464 4701 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684472 4701 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684479 4701 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684487 4701 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684495 4701 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684504 4701 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684511 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684519 4701 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.684527 4701 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684663 4701 flags.go:64] FLAG: --address="0.0.0.0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684679 4701 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684693 4701 flags.go:64] FLAG: --anonymous-auth="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684704 4701 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684716 4701 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684725 4701 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684736 4701 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684752 4701 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684761 4701 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684770 4701 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684780 4701 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684789 4701 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684799 4701 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684808 4701 flags.go:64] FLAG: --cgroup-root="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684816 4701 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684826 4701 flags.go:64] FLAG: --client-ca-file="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684835 4701 flags.go:64] FLAG: --cloud-config="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684844 4701 flags.go:64] FLAG: --cloud-provider="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684852 4701 flags.go:64] FLAG: --cluster-dns="[]" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684864 4701 flags.go:64] FLAG: --cluster-domain="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684873 4701 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684883 4701 flags.go:64] FLAG: --config-dir="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684891 4701 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684901 4701 flags.go:64] FLAG: --container-log-max-files="5" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684913 4701 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684922 4701 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684932 4701 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684942 4701 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684950 4701 flags.go:64] FLAG: --contention-profiling="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684959 4701 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684968 4701 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684977 4701 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684988 4701 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.684999 4701 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685007 4701 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685016 4701 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685026 4701 flags.go:64] FLAG: --enable-load-reader="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685034 4701 flags.go:64] FLAG: --enable-server="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685043 4701 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685054 4701 flags.go:64] FLAG: --event-burst="100" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685062 4701 flags.go:64] FLAG: --event-qps="50" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685071 4701 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685080 4701 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685089 4701 flags.go:64] FLAG: --eviction-hard="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685099 4701 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685108 4701 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685117 4701 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685126 4701 flags.go:64] FLAG: --eviction-soft="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685135 4701 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685144 4701 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685153 4701 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685162 4701 flags.go:64] FLAG: --experimental-mounter-path="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685171 4701 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685180 4701 flags.go:64] FLAG: --fail-swap-on="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685189 4701 flags.go:64] FLAG: --feature-gates="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.685227 4701 flags.go:64] FLAG: --file-check-frequency="20s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686130 4701 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686145 4701 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686154 4701 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686164 4701 flags.go:64] FLAG: --healthz-port="10248" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686174 4701 flags.go:64] FLAG: --help="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686182 4701 flags.go:64] FLAG: --hostname-override="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686191 4701 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686229 4701 flags.go:64] FLAG: --http-check-frequency="20s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686239 4701 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686248 4701 flags.go:64] FLAG: --image-credential-provider-config="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686256 4701 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686267 4701 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686278 4701 flags.go:64] FLAG: --image-service-endpoint="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686287 4701 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686296 4701 flags.go:64] FLAG: --kube-api-burst="100" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686305 4701 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686315 4701 flags.go:64] FLAG: --kube-api-qps="50" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686324 4701 flags.go:64] FLAG: --kube-reserved="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686333 4701 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686342 4701 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686351 4701 flags.go:64] FLAG: --kubelet-cgroups="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686360 4701 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686369 4701 flags.go:64] FLAG: --lock-file="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686378 4701 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686388 4701 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686398 4701 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686413 4701 flags.go:64] FLAG: --log-json-split-stream="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686422 4701 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686435 4701 flags.go:64] FLAG: --log-text-split-stream="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686444 4701 flags.go:64] FLAG: --logging-format="text" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686453 4701 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686463 4701 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686472 4701 flags.go:64] FLAG: --manifest-url="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686481 4701 flags.go:64] FLAG: --manifest-url-header="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686492 4701 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686501 4701 flags.go:64] FLAG: --max-open-files="1000000" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686512 4701 flags.go:64] FLAG: --max-pods="110" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686522 4701 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686531 4701 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686539 4701 flags.go:64] FLAG: --memory-manager-policy="None" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686548 4701 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686557 4701 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686566 4701 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686575 4701 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686594 4701 flags.go:64] FLAG: --node-status-max-images="50" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686602 4701 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686611 4701 flags.go:64] FLAG: --oom-score-adj="-999" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686620 4701 flags.go:64] FLAG: --pod-cidr="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686630 4701 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686643 4701 flags.go:64] FLAG: --pod-manifest-path="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686652 4701 flags.go:64] FLAG: --pod-max-pids="-1" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686661 4701 flags.go:64] FLAG: --pods-per-core="0" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686670 4701 flags.go:64] FLAG: --port="10250" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686679 4701 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686688 4701 flags.go:64] FLAG: --provider-id="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686696 4701 flags.go:64] FLAG: --qos-reserved="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686705 4701 flags.go:64] FLAG: --read-only-port="10255" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686714 4701 flags.go:64] FLAG: --register-node="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686725 4701 flags.go:64] FLAG: --register-schedulable="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686735 4701 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686754 4701 flags.go:64] FLAG: --registry-burst="10" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686763 4701 flags.go:64] FLAG: --registry-qps="5" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686772 4701 flags.go:64] FLAG: --reserved-cpus="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686781 4701 flags.go:64] FLAG: --reserved-memory="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686791 4701 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686800 4701 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686809 4701 flags.go:64] FLAG: --rotate-certificates="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686818 4701 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686828 4701 flags.go:64] FLAG: --runonce="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686836 4701 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686846 4701 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686855 4701 flags.go:64] FLAG: --seccomp-default="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686863 4701 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686872 4701 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686882 4701 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686891 4701 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686900 4701 flags.go:64] FLAG: --storage-driver-password="root" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686908 4701 flags.go:64] FLAG: --storage-driver-secure="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686917 4701 flags.go:64] FLAG: --storage-driver-table="stats" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686926 4701 flags.go:64] FLAG: --storage-driver-user="root" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686935 4701 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686944 4701 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686953 4701 flags.go:64] FLAG: --system-cgroups="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686962 4701 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686977 4701 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686986 4701 flags.go:64] FLAG: --tls-cert-file="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.686994 4701 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687006 4701 flags.go:64] FLAG: --tls-min-version="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687015 4701 flags.go:64] FLAG: --tls-private-key-file="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687024 4701 flags.go:64] FLAG: --topology-manager-policy="none" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687034 4701 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687043 4701 flags.go:64] FLAG: --topology-manager-scope="container" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687054 4701 flags.go:64] FLAG: --v="2" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687066 4701 flags.go:64] FLAG: --version="false" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687077 4701 flags.go:64] FLAG: --vmodule="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687088 4701 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687097 4701 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687316 4701 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687326 4701 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687335 4701 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687344 4701 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687353 4701 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687361 4701 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687370 4701 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687378 4701 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687387 4701 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687395 4701 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687403 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687410 4701 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687418 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687426 4701 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687434 4701 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687441 4701 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687449 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687457 4701 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687465 4701 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687472 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687480 4701 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687487 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687495 4701 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687504 4701 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687513 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687521 4701 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687529 4701 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687540 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687547 4701 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687556 4701 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687563 4701 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687571 4701 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687579 4701 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687587 4701 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687595 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687603 4701 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687610 4701 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687618 4701 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687625 4701 feature_gate.go:330] unrecognized feature gate: Example Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687633 4701 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687641 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687648 4701 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687658 4701 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687669 4701 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687678 4701 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687689 4701 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687698 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687706 4701 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687713 4701 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687721 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687729 4701 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687736 4701 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687744 4701 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687751 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687759 4701 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687767 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687775 4701 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687783 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687791 4701 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687802 4701 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687811 4701 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687818 4701 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687826 4701 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687833 4701 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687841 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687849 4701 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687857 4701 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687867 4701 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687876 4701 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687885 4701 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.687894 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.687918 4701 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.701285 4701 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.701341 4701 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701480 4701 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701495 4701 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701505 4701 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701514 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701525 4701 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701534 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701542 4701 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701552 4701 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701560 4701 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701568 4701 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701577 4701 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701585 4701 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701595 4701 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701605 4701 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701617 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701626 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701634 4701 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701642 4701 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701650 4701 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701659 4701 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701666 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701674 4701 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701682 4701 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701692 4701 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701702 4701 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701710 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701719 4701 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701728 4701 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701737 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701746 4701 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701754 4701 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701762 4701 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701770 4701 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701778 4701 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701786 4701 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701794 4701 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701801 4701 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701810 4701 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701818 4701 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701825 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701833 4701 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701841 4701 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701848 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701856 4701 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701864 4701 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701872 4701 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701879 4701 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701889 4701 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701900 4701 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701909 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701919 4701 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701928 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701937 4701 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701946 4701 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701954 4701 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701962 4701 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701971 4701 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701979 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701986 4701 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.701995 4701 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702002 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702010 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702017 4701 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702024 4701 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702032 4701 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702043 4701 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702052 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702062 4701 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702071 4701 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702079 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702088 4701 feature_gate.go:330] unrecognized feature gate: Example Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.702101 4701 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702350 4701 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702363 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702373 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702382 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702390 4701 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702398 4701 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702406 4701 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702414 4701 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702422 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702431 4701 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702439 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702447 4701 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702454 4701 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702463 4701 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702471 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702480 4701 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702490 4701 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702498 4701 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702507 4701 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702514 4701 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702522 4701 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702530 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702537 4701 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702545 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702553 4701 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702560 4701 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702568 4701 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702575 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702583 4701 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702591 4701 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702598 4701 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702606 4701 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702614 4701 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702622 4701 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702631 4701 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702686 4701 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702697 4701 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702707 4701 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702716 4701 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702726 4701 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702736 4701 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702744 4701 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702752 4701 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702760 4701 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702769 4701 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702776 4701 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702784 4701 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702794 4701 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702804 4701 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702813 4701 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702824 4701 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702833 4701 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702841 4701 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702849 4701 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702858 4701 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702867 4701 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702874 4701 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702882 4701 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702890 4701 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702897 4701 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702906 4701 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702914 4701 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702921 4701 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702931 4701 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702940 4701 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702948 4701 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702955 4701 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702963 4701 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702971 4701 feature_gate.go:330] unrecognized feature gate: Example Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702981 4701 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.702991 4701 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.703002 4701 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.703291 4701 server.go:940] "Client rotation is on, will bootstrap in background" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.709768 4701 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.709908 4701 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.712348 4701 server.go:997] "Starting client certificate rotation" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.712400 4701 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.714314 4701 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-23 18:00:21.312651866 +0000 UTC Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.714420 4701 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 766h58m31.598237595s for next certificate rotation Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.738998 4701 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.741918 4701 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.764581 4701 log.go:25] "Validated CRI v1 runtime API" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.801118 4701 log.go:25] "Validated CRI v1 image API" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.803271 4701 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.810496 4701 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-21-18-56-40-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.810562 4701 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.841252 4701 manager.go:217] Machine: {Timestamp:2025-11-21 19:01:49.837303931 +0000 UTC m=+0.622443998 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:5ab738c4-0d34-41bd-a531-77773953d838 BootID:3bda9678-f6a5-4de4-acaa-3527a0be80fa Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:f4:c7:7c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:f4:c7:7c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:78:e8:d7 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ed:7f:6a Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:6f:cb:63 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:39:68:62 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:2a:96:fc:79:e3:a9 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:32:88:14:61:a8:8c Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.841608 4701 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.841822 4701 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.844127 4701 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.844422 4701 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.844487 4701 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.844782 4701 topology_manager.go:138] "Creating topology manager with none policy" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.844797 4701 container_manager_linux.go:303] "Creating device plugin manager" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.845781 4701 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.845814 4701 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.847145 4701 state_mem.go:36] "Initialized new in-memory state store" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.847297 4701 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.851767 4701 kubelet.go:418] "Attempting to sync node with API server" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.851795 4701 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.851823 4701 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.851843 4701 kubelet.go:324] "Adding apiserver pod source" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.851857 4701 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.858333 4701 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.864353 4701 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.871991 4701 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.872084 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.872192 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.872300 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.872533 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874029 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874072 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874088 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874101 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874125 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874138 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874151 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874171 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874186 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874231 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874262 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.874276 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.876585 4701 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.877336 4701 server.go:1280] "Started kubelet" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.878713 4701 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:49 crc systemd[1]: Started Kubernetes Kubelet. Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.879576 4701 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.879570 4701 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.880376 4701 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.881634 4701 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.881696 4701 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.881717 4701 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 03:51:47.098061583 +0000 UTC Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.881879 4701 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.881928 4701 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.881944 4701 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.882063 4701 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.882567 4701 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.12:6443: connect: connection refused" interval="200ms" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883274 4701 factory.go:55] Registering systemd factory Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883307 4701 factory.go:221] Registration of the systemd container factory successfully Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883565 4701 factory.go:153] Registering CRI-O factory Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883585 4701 factory.go:221] Registration of the crio container factory successfully Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883656 4701 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883693 4701 factory.go:103] Registering Raw factory Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.883709 4701 manager.go:1196] Started watching for new ooms in manager Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.884544 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.886063 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.886545 4701 server.go:460] "Adding debug handlers to kubelet server" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.887260 4701 manager.go:319] Starting recovery of all containers Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.888419 4701 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.12:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a1ad5db39479d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-21 19:01:49.877290909 +0000 UTC m=+0.662430966,LastTimestamp:2025-11-21 19:01:49.877290909 +0000 UTC m=+0.662430966,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905625 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905733 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905749 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905764 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905818 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905832 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905845 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905857 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905874 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905887 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905899 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905912 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905926 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905942 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905954 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905968 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905980 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.905992 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906028 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906040 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906052 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906065 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906079 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906093 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906104 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906117 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906133 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906148 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906160 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906173 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906185 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906220 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906234 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906248 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906261 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906276 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906292 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906308 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906321 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906335 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906346 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906360 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906372 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906384 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906397 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906409 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906422 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906437 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906449 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906463 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906478 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906490 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906516 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906529 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906542 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906556 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906569 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906583 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906598 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906611 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906623 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906636 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906649 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906661 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906676 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906689 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906702 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906715 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906730 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906743 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906756 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906770 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906783 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906796 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906809 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906824 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906837 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906851 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906864 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906877 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906890 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906905 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906918 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906931 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906946 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906960 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906974 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906987 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.906999 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907011 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907025 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907059 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907073 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907085 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907098 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907111 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907125 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907138 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907152 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907167 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907185 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907218 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907233 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907246 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907275 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907290 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907304 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907318 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907361 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907377 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907393 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907408 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907422 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907436 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907449 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907465 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907481 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907495 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907509 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907521 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907557 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907571 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907583 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907598 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907613 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907631 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907644 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907657 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907671 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907684 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907697 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907711 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907745 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907763 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907781 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907794 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907809 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907823 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907838 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907851 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907873 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907887 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907903 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907919 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907933 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907946 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907959 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907972 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907986 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.907998 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908012 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908025 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908039 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908055 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908081 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908097 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908112 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908125 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908138 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908152 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908164 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908177 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908190 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908231 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908245 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908258 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908271 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908285 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908300 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.908320 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911437 4701 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911513 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911540 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911587 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911633 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911654 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911673 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911720 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911739 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911759 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911783 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911905 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911925 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911945 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.911996 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912018 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912067 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912131 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912151 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912171 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912250 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912273 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912292 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912313 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912331 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912352 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912374 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912393 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912432 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912481 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912503 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912551 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912579 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912646 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912695 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912727 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912748 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912769 4701 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912789 4701 reconstruct.go:97] "Volume reconstruction finished" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.912803 4701 reconciler.go:26] "Reconciler: start to sync state" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.929326 4701 manager.go:324] Recovery completed Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.940615 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.942524 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.942588 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.942607 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.944197 4701 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.944265 4701 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.944329 4701 state_mem.go:36] "Initialized new in-memory state store" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.947991 4701 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.949565 4701 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.949656 4701 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.949711 4701 kubelet.go:2335] "Starting kubelet main sync loop" Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.949840 4701 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 21 19:01:49 crc kubenswrapper[4701]: W1121 19:01:49.953282 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.953410 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.966885 4701 policy_none.go:49] "None policy: Start" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.968193 4701 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 21 19:01:49 crc kubenswrapper[4701]: I1121 19:01:49.968292 4701 state_mem.go:35] "Initializing new in-memory state store" Nov 21 19:01:49 crc kubenswrapper[4701]: E1121 19:01:49.982365 4701 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.049819 4701 manager.go:334] "Starting Device Plugin manager" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.050024 4701 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.050052 4701 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.050098 4701 server.go:79] "Starting device plugin registration server" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.050775 4701 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.050791 4701 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.050974 4701 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.051162 4701 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.051182 4701 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.065921 4701 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.084194 4701 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.12:6443: connect: connection refused" interval="400ms" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.151886 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.153255 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.153310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.153330 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.153370 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.154087 4701 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.12:6443: connect: connection refused" node="crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.251148 4701 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.251408 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.257104 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.257185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.257264 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.257566 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.257735 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.257804 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259396 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259462 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259480 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259491 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259555 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259580 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259697 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259817 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.259885 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261237 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261272 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261284 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261305 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261317 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261325 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261723 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261803 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.261874 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263138 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263250 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263270 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263286 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263336 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263349 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263572 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263733 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.263784 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265111 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265179 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265466 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265525 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265542 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265840 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.265892 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.267143 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.267184 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.267228 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.318696 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.318762 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.318800 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.318835 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.318922 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319032 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319086 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319129 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319165 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319196 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319255 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319286 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319314 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319377 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.319412 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.354917 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.356610 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.356652 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.356669 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.356704 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.357403 4701 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.12:6443: connect: connection refused" node="crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.420939 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421007 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421040 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421072 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421130 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421162 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421168 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421250 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421291 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421339 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421308 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421361 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421390 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421486 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421505 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421512 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421555 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421646 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421588 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421683 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421787 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421808 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421834 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421857 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421865 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421887 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421893 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421932 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.422041 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.421935 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.485772 4701 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.12:6443: connect: connection refused" interval="800ms" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.591308 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.620550 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.634399 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.648482 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-95629643d0c7fe8fd53bfc2b8cf8278b76d8599b5621917ecb08fe8e99766378 WatchSource:0}: Error finding container 95629643d0c7fe8fd53bfc2b8cf8278b76d8599b5621917ecb08fe8e99766378: Status 404 returned error can't find the container with id 95629643d0c7fe8fd53bfc2b8cf8278b76d8599b5621917ecb08fe8e99766378 Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.659084 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.667408 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.669371 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b43c031f90e78e1a5f61279e4c67b94fe5b1785923ca75030b5d7ee907ab879c WatchSource:0}: Error finding container b43c031f90e78e1a5f61279e4c67b94fe5b1785923ca75030b5d7ee907ab879c: Status 404 returned error can't find the container with id b43c031f90e78e1a5f61279e4c67b94fe5b1785923ca75030b5d7ee907ab879c Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.673983 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-ab45f5feddaf9f6036b2d6200ac4eb1cd374418106cf20c7cb9a196ec76614f1 WatchSource:0}: Error finding container ab45f5feddaf9f6036b2d6200ac4eb1cd374418106cf20c7cb9a196ec76614f1: Status 404 returned error can't find the container with id ab45f5feddaf9f6036b2d6200ac4eb1cd374418106cf20c7cb9a196ec76614f1 Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.686112 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-ba96ca4c2517603559281a0f310de85ba020e4fcc83d24a8794078944a281917 WatchSource:0}: Error finding container ba96ca4c2517603559281a0f310de85ba020e4fcc83d24a8794078944a281917: Status 404 returned error can't find the container with id ba96ca4c2517603559281a0f310de85ba020e4fcc83d24a8794078944a281917 Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.691579 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-888516f3e0f37550009a7a896b7ff07eaf7c1b81bd7c347eb9e40ee7fa81173e WatchSource:0}: Error finding container 888516f3e0f37550009a7a896b7ff07eaf7c1b81bd7c347eb9e40ee7fa81173e: Status 404 returned error can't find the container with id 888516f3e0f37550009a7a896b7ff07eaf7c1b81bd7c347eb9e40ee7fa81173e Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.758272 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.759822 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.759893 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.759919 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.759966 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.760642 4701 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.12:6443: connect: connection refused" node="crc" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.880481 4701 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.882484 4701 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 20:44:13.420771863 +0000 UTC Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.882574 4701 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 193h42m22.538201347s for next certificate rotation Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.889400 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.889673 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:50 crc kubenswrapper[4701]: W1121 19:01:50.894680 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:50 crc kubenswrapper[4701]: E1121 19:01:50.894785 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.956878 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b43c031f90e78e1a5f61279e4c67b94fe5b1785923ca75030b5d7ee907ab879c"} Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.959038 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"95629643d0c7fe8fd53bfc2b8cf8278b76d8599b5621917ecb08fe8e99766378"} Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.961052 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"888516f3e0f37550009a7a896b7ff07eaf7c1b81bd7c347eb9e40ee7fa81173e"} Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.963848 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ba96ca4c2517603559281a0f310de85ba020e4fcc83d24a8794078944a281917"} Nov 21 19:01:50 crc kubenswrapper[4701]: I1121 19:01:50.965171 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ab45f5feddaf9f6036b2d6200ac4eb1cd374418106cf20c7cb9a196ec76614f1"} Nov 21 19:01:51 crc kubenswrapper[4701]: W1121 19:01:51.160179 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:51 crc kubenswrapper[4701]: E1121 19:01:51.160371 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:51 crc kubenswrapper[4701]: E1121 19:01:51.286940 4701 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.12:6443: connect: connection refused" interval="1.6s" Nov 21 19:01:51 crc kubenswrapper[4701]: W1121 19:01:51.461783 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:51 crc kubenswrapper[4701]: E1121 19:01:51.461912 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.561626 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.564272 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.564334 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.564347 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.564385 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:01:51 crc kubenswrapper[4701]: E1121 19:01:51.564712 4701 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.12:6443: connect: connection refused" node="crc" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.880520 4701 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.969773 4701 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819" exitCode=0 Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.969884 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819"} Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.969923 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.971070 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.971126 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.971143 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.973464 4701 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="62447a30421e1b4e0411d6f44dd91d36b3c202f8edaf3fbabe2c10f85dc4f8e9" exitCode=0 Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.973525 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"62447a30421e1b4e0411d6f44dd91d36b3c202f8edaf3fbabe2c10f85dc4f8e9"} Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.973666 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.974042 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.974790 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.974816 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.974825 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.975114 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.975177 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.975196 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.978585 4701 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="8a2359755af7f5c462ae4fe53417cd97497fea9e1e56ad4d187ed3e2eb1893ef" exitCode=0 Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.978686 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.978728 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"8a2359755af7f5c462ae4fe53417cd97497fea9e1e56ad4d187ed3e2eb1893ef"} Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.979607 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.979665 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.979684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.980828 4701 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e" exitCode=0 Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.980877 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.980884 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e"} Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.982074 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.982114 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.982131 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.983310 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617"} Nov 21 19:01:51 crc kubenswrapper[4701]: I1121 19:01:51.983392 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c"} Nov 21 19:01:52 crc kubenswrapper[4701]: W1121 19:01:52.870350 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:52 crc kubenswrapper[4701]: E1121 19:01:52.870446 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.12:6443: connect: connection refused" logger="UnhandledError" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.880262 4701 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.12:6443: connect: connection refused Nov 21 19:01:52 crc kubenswrapper[4701]: E1121 19:01:52.888411 4701 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.12:6443: connect: connection refused" interval="3.2s" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.987880 4701 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a7fe63738ee9760765dbce16bd335fca190df086056f2ce6cb30d1314a1f4bd5" exitCode=0 Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.987939 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a7fe63738ee9760765dbce16bd335fca190df086056f2ce6cb30d1314a1f4bd5"} Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.988050 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.988874 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.988902 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.988914 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.992160 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e182240704b0db29602ce30da8f686101d2dc12a4867ec6a388198bf6585ed32"} Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.992329 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.993372 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.993401 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:52 crc kubenswrapper[4701]: I1121 19:01:52.993412 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.003654 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.003696 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.003710 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.003814 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.005298 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.005324 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.005335 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.009518 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.009578 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.009580 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.011635 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.011658 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.011669 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.014178 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.014222 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.014234 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.014244 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612"} Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.165250 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.166906 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.166940 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.166948 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.166980 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:01:53 crc kubenswrapper[4701]: E1121 19:01:53.167484 4701 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.12:6443: connect: connection refused" node="crc" Nov 21 19:01:53 crc kubenswrapper[4701]: I1121 19:01:53.749796 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.020454 4701 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7c017b6680f08a19a1f202057f2d4cb400e286d9d9ef6c2479c2c81ba5a90ebe" exitCode=0 Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.020585 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7c017b6680f08a19a1f202057f2d4cb400e286d9d9ef6c2479c2c81ba5a90ebe"} Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.020763 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.021943 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.021983 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.022003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.028723 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b"} Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.028755 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.028848 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.028878 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.028982 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.028996 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.030508 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.030542 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.030553 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031338 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031360 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031370 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031494 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031553 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031572 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031927 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031973 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:54 crc kubenswrapper[4701]: I1121 19:01:54.031986 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.038974 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0d073e4a28967415f0dc7dc3d38ec69729b57a48a1d48e128b1d1a8e0e7a7aa9"} Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.039073 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1eec1ead5fc300459c20525faa60092d5abb431217b2b04efe051b5db273a98d"} Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.039092 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.039123 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.039100 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"af2596742ee71766716af034336ef9c3ce534e980f681f4337d98bca67777f2b"} Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.039255 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.040426 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.040462 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.040472 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.041091 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.041156 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.041188 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:55 crc kubenswrapper[4701]: I1121 19:01:55.260174 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.048868 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"239b6e141ecef1ef6f2ddf8baf8813be33a658256b537b000c085956ab331b6a"} Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.048921 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1ec056bc105e75fd61d25faefdea38ea114c6ccb15da8365b50ddf62e665f294"} Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.048966 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.049054 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.049166 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051526 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051606 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051631 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051696 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051738 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051751 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051852 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051878 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.051893 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.368123 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.370185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.370278 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.370298 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.370343 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:01:56 crc kubenswrapper[4701]: I1121 19:01:56.385629 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.052249 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.053753 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.053814 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.053836 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.935738 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.936029 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.937561 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.937609 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:57 crc kubenswrapper[4701]: I1121 19:01:57.937627 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.055101 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.056664 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.056730 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.056747 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.261018 4701 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.261157 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.368295 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.368594 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.370538 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.370607 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.370627 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.703973 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.704278 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.705979 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.706052 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.706070 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.867930 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.868278 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.870063 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.870133 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:01:58 crc kubenswrapper[4701]: I1121 19:01:58.870161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:00 crc kubenswrapper[4701]: E1121 19:02:00.066317 4701 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 19:02:02 crc kubenswrapper[4701]: I1121 19:02:02.158356 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:02:02 crc kubenswrapper[4701]: I1121 19:02:02.159192 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:02 crc kubenswrapper[4701]: I1121 19:02:02.161701 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:02 crc kubenswrapper[4701]: I1121 19:02:02.161765 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:02 crc kubenswrapper[4701]: I1121 19:02:02.161792 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:02 crc kubenswrapper[4701]: I1121 19:02:02.167231 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.069535 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.070837 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.070890 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.070907 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.076746 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:02:03 crc kubenswrapper[4701]: W1121 19:02:03.802904 4701 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.803053 4701 trace.go:236] Trace[20146223]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Nov-2025 19:01:53.801) (total time: 10001ms): Nov 21 19:02:03 crc kubenswrapper[4701]: Trace[20146223]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (19:02:03.802) Nov 21 19:02:03 crc kubenswrapper[4701]: Trace[20146223]: [10.001324701s] [10.001324701s] END Nov 21 19:02:03 crc kubenswrapper[4701]: E1121 19:02:03.803090 4701 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.828336 4701 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:44554->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.828428 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:44554->192.168.126.11:17697: read: connection reset by peer" Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.880534 4701 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.995279 4701 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 21 19:02:03 crc kubenswrapper[4701]: I1121 19:02:03.995356 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.002969 4701 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.003040 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.073522 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.075325 4701 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b" exitCode=255 Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.075487 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.075991 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b"} Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.076287 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.076326 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.076337 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.076421 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.077545 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.077596 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.077606 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.078237 4701 scope.go:117] "RemoveContainer" containerID="3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.417528 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.849156 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.850092 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.851475 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.851558 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.851589 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:04 crc kubenswrapper[4701]: I1121 19:02:04.903292 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.080670 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.082275 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.082938 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.083227 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1"} Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.083644 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.083668 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.083677 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.084182 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.084218 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.084226 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:05 crc kubenswrapper[4701]: I1121 19:02:05.096137 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.085945 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.085999 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.085945 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.087691 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.087752 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.087775 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.087716 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.088180 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:06 crc kubenswrapper[4701]: I1121 19:02:06.088433 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:07 crc kubenswrapper[4701]: I1121 19:02:07.093062 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:07 crc kubenswrapper[4701]: I1121 19:02:07.094285 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:07 crc kubenswrapper[4701]: I1121 19:02:07.094350 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:07 crc kubenswrapper[4701]: I1121 19:02:07.094389 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:07 crc kubenswrapper[4701]: I1121 19:02:07.108613 4701 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.261738 4701 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.261817 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.876532 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.876722 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.878467 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.878500 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.878512 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.884261 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:02:08 crc kubenswrapper[4701]: E1121 19:02:08.988949 4701 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.992053 4701 trace.go:236] Trace[1501535633]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Nov-2025 19:01:58.339) (total time: 10652ms): Nov 21 19:02:08 crc kubenswrapper[4701]: Trace[1501535633]: ---"Objects listed" error: 10652ms (19:02:08.991) Nov 21 19:02:08 crc kubenswrapper[4701]: Trace[1501535633]: [10.652195884s] [10.652195884s] END Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.992095 4701 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.994770 4701 trace.go:236] Trace[1257978531]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Nov-2025 19:01:54.005) (total time: 14989ms): Nov 21 19:02:08 crc kubenswrapper[4701]: Trace[1257978531]: ---"Objects listed" error: 14989ms (19:02:08.994) Nov 21 19:02:08 crc kubenswrapper[4701]: Trace[1257978531]: [14.989125865s] [14.989125865s] END Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.994820 4701 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.994919 4701 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.995443 4701 trace.go:236] Trace[2043759258]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Nov-2025 19:01:54.211) (total time: 14783ms): Nov 21 19:02:08 crc kubenswrapper[4701]: Trace[2043759258]: ---"Objects listed" error: 14783ms (19:02:08.995) Nov 21 19:02:08 crc kubenswrapper[4701]: Trace[2043759258]: [14.783543373s] [14.783543373s] END Nov 21 19:02:08 crc kubenswrapper[4701]: I1121 19:02:08.995481 4701 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 21 19:02:08 crc kubenswrapper[4701]: E1121 19:02:08.996013 4701 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.865714 4701 apiserver.go:52] "Watching apiserver" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.869088 4701 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.869638 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.870132 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.871003 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.871096 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.872344 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.872465 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.872827 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.873236 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.873550 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.874052 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.874326 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.874514 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.878429 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.878828 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.878855 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.879189 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.879314 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.879368 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.879962 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.883372 4701 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901445 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901498 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901534 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901561 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901589 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901621 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901650 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901680 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901709 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901737 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901767 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901800 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901831 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901861 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901889 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.901998 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902025 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902050 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902073 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902094 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902115 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902135 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902156 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902179 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902365 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902407 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902437 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902465 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902493 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902522 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902561 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902571 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902618 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902640 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902666 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902696 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902728 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902748 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902776 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902809 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902850 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902883 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902918 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902948 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902978 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903011 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903043 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903072 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903103 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903192 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903250 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903332 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903366 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903397 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903428 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903461 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903494 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903522 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903552 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903586 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903615 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903643 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903674 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903707 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903736 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903764 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903808 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903839 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903871 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903904 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903941 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905334 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905381 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905416 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905449 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905482 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905517 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905555 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905588 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905618 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905753 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905840 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.905966 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906027 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906064 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906120 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906152 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906183 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906237 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906274 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906306 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906398 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906446 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906543 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906601 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906637 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906671 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906703 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906741 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906776 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906809 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906841 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906873 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906908 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906944 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.906977 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907010 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907044 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907075 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907109 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907142 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907178 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907231 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907266 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907337 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907378 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907413 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907446 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907479 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907511 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907546 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907581 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907614 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907646 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907677 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907709 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907745 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907776 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907810 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907841 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907879 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907912 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907945 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907977 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908015 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908047 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908083 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908118 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908163 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908195 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908252 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908290 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908323 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908354 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908443 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908478 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908516 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908550 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908586 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908623 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908654 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908685 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908721 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908827 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908864 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908903 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908937 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908971 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909006 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909039 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909072 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909114 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909149 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909180 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909246 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909281 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909313 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909350 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909383 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909420 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909452 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909486 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909523 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909626 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909738 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909787 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909819 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909849 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909888 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909923 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910012 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910052 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910090 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910122 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910156 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910192 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910252 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910291 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910326 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910362 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.902863 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903270 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903263 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903366 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903550 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903708 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903735 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903816 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903884 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903985 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.903991 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.904099 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907975 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.907996 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908402 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908685 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910663 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908792 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.908824 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909106 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909132 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909756 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.909822 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.911184 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.911328 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.911757 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.911884 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.912106 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.912443 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.912510 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.913537 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.913632 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.913665 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.913687 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.913914 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:02:10.413866289 +0000 UTC m=+21.199006326 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.913971 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.914241 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.910399 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.914435 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.914498 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.914392 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915061 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915111 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915179 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915274 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915327 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915340 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915436 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915510 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915571 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915633 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915693 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915751 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915764 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915910 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915977 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.915960 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.916176 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.916272 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.916413 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.916587 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.916837 4701 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917186 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917183 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917252 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917253 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.917354 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917347 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917410 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.917423 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:10.417411006 +0000 UTC m=+21.202551043 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917466 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917518 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.917696 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.917737 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:10.417727301 +0000 UTC m=+21.202867338 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917741 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917773 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917806 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917832 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917854 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917886 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.917912 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.918409 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.918673 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.919184 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.919402 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.919654 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.919925 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.920626 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.920650 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.920811 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.920933 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921109 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921154 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921178 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921268 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921426 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921556 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.921621 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.922351 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.922568 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.922617 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.922730 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.923369 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.923403 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.923664 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.923711 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.923762 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.923828 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.925253 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.926583 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.927406 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.927422 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.927492 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.927592 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.928237 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.928305 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.928563 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.928570 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.928756 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.930121 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.930281 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.932286 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.932321 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.932330 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.933277 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.933335 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.933538 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.934039 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.934064 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.934413 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.934816 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935000 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935026 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935040 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935053 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935068 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935081 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935095 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935109 4701 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935125 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935144 4701 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935162 4701 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935180 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935193 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935226 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935238 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935250 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935263 4701 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935275 4701 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935268 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935292 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935485 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935644 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935752 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.935975 4701 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936077 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936194 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936302 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936338 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936429 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936458 4701 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936485 4701 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936514 4701 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936570 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936601 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936692 4701 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936724 4701 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936753 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936782 4701 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936801 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936811 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936852 4701 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936868 4701 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936885 4701 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936905 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936924 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.936943 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.937435 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.937742 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.938476 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.938791 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.938949 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.939057 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.940174 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.948394 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.948489 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.949886 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.950336 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.951261 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.951562 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.951585 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.951601 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.951669 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:10.451647568 +0000 UTC m=+21.236787605 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.951847 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.952123 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.952164 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.952329 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.953076 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.953118 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.952431 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.952893 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.953250 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.953900 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.953928 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:09 crc kubenswrapper[4701]: E1121 19:02:09.954043 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:10.454020266 +0000 UTC m=+21.239160303 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.954665 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.954694 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.954930 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.954986 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.954983 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.955281 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.955760 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.956618 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.956756 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.956949 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.958935 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.960521 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.960777 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.961733 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.961990 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.962528 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.963054 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.963080 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.963314 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.963800 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.964766 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.965481 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.965614 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.965932 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.966642 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.966663 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.966882 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.966932 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.967127 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.967462 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.967156 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.969285 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.968875 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.969068 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.967884 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.969407 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.973790 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.973943 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.974071 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.974079 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.974232 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.974717 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.974962 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.975330 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.976133 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.976246 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.976511 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.976967 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.977419 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.977494 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.977691 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.978809 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.978809 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.978811 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.979139 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.979561 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.979688 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.979780 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.982697 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.983086 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.983399 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.983424 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.983730 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.983760 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984015 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984047 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984222 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984294 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984645 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984703 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984829 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.984902 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.985857 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.986038 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.987253 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.989979 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.990862 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.993260 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.994669 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.996049 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.996179 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.997495 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 21 19:02:09 crc kubenswrapper[4701]: I1121 19:02:09.998717 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.000249 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.001612 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.001822 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.003008 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.006236 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.007482 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.008962 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.009933 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.010604 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.012008 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.012781 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.013392 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.015042 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.015271 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.015641 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.015789 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.016160 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.017573 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.018943 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.019574 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.019640 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.020117 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.020964 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.021484 4701 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.021612 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.023637 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.024148 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.024570 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.026046 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.027015 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.027513 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.028482 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.029094 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.030056 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.030658 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.031980 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.032825 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.033360 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.033962 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.035241 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.035972 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.037396 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.037982 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.038535 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.039435 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.039979 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.040607 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.041496 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042258 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042340 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042426 4701 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042449 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042464 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042480 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042523 4701 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042546 4701 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042559 4701 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042573 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042590 4701 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042602 4701 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042614 4701 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042626 4701 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042638 4701 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042649 4701 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042661 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042673 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042685 4701 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042696 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042708 4701 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042720 4701 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042733 4701 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042747 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042759 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042773 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.042941 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043328 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043349 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043385 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043633 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043718 4701 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043802 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043874 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.043947 4701 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044013 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044072 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044124 4701 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044185 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044271 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044326 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044378 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044442 4701 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044496 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044553 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044605 4701 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044657 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044712 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044769 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044841 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044901 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.044954 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045006 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045133 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045242 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045382 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045482 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045541 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045593 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045646 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045699 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045762 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045822 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045879 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045932 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.045992 4701 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046085 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046146 4701 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046227 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046292 4701 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046355 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046410 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046466 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046519 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046570 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046620 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046671 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046730 4701 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046791 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046845 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046898 4701 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046949 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.046999 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047058 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047112 4701 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047163 4701 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047243 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047304 4701 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047357 4701 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047482 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047536 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047589 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047641 4701 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047698 4701 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047751 4701 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047808 4701 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047861 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047912 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.047964 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048015 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048077 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048130 4701 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048181 4701 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048267 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048327 4701 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048378 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048433 4701 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048486 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048538 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048589 4701 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048638 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048696 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048749 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048804 4701 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048858 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048911 4701 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.048960 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049019 4701 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049070 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049119 4701 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049170 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049248 4701 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049307 4701 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049368 4701 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049421 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049472 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049529 4701 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049579 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049638 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049696 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049748 4701 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049799 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049853 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049909 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.049968 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050022 4701 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050100 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050157 4701 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050231 4701 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050312 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050374 4701 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050428 4701 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050479 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050531 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050587 4701 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.050647 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051132 4701 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051189 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051274 4701 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051330 4701 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051383 4701 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051444 4701 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051503 4701 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051558 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051609 4701 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.051666 4701 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.055875 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.064597 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.074463 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.085638 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.096555 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.107411 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.123998 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.136392 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.195134 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 19:02:10 crc kubenswrapper[4701]: W1121 19:02:10.214394 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-424e652489d85b9abb94b33993eeb77ab8e0efbc415c7650a5efcd877cafb6eb WatchSource:0}: Error finding container 424e652489d85b9abb94b33993eeb77ab8e0efbc415c7650a5efcd877cafb6eb: Status 404 returned error can't find the container with id 424e652489d85b9abb94b33993eeb77ab8e0efbc415c7650a5efcd877cafb6eb Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.215779 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 19:02:10 crc kubenswrapper[4701]: W1121 19:02:10.248802 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-f4690f8d4ef9d3ad1c9fba9d5fc24cf8a98ba9b4f6035c0f2068743af8899996 WatchSource:0}: Error finding container f4690f8d4ef9d3ad1c9fba9d5fc24cf8a98ba9b4f6035c0f2068743af8899996: Status 404 returned error can't find the container with id f4690f8d4ef9d3ad1c9fba9d5fc24cf8a98ba9b4f6035c0f2068743af8899996 Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.249935 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.457981 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.458059 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.458086 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.458117 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.458144 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458293 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458313 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458328 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458380 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:11.458364458 +0000 UTC m=+22.243504495 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458470 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:02:11.45846183 +0000 UTC m=+22.243601877 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458508 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458535 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:11.458527282 +0000 UTC m=+22.243667319 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458588 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458615 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:11.458608183 +0000 UTC m=+22.243748220 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458669 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458680 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458690 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.458717 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:11.458709415 +0000 UTC m=+22.243849462 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:10 crc kubenswrapper[4701]: I1121 19:02:10.950369 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:10 crc kubenswrapper[4701]: E1121 19:02:10.950498 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.107282 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30"} Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.107386 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"424e652489d85b9abb94b33993eeb77ab8e0efbc415c7650a5efcd877cafb6eb"} Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.108875 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"5d444df9df64801043aaefd24c65ca009e9def6462a835457d45f9a60a43b07a"} Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.112308 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e"} Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.112363 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0"} Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.112384 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"f4690f8d4ef9d3ad1c9fba9d5fc24cf8a98ba9b4f6035c0f2068743af8899996"} Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.132401 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.154759 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.176675 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.200720 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.223159 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.246836 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.267712 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.293111 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.312525 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.335837 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.355838 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.376107 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.401083 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.430934 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:11Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.466848 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.466964 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.467019 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467084 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:02:13.467044795 +0000 UTC m=+24.252184862 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467152 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.467160 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467305 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:13.467279789 +0000 UTC m=+24.252419856 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467354 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467508 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:13.467461222 +0000 UTC m=+24.252601469 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467538 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467551 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467619 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467565 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467642 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467664 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467734 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:13.467716156 +0000 UTC m=+24.252856443 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.467764 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:13.467750236 +0000 UTC m=+24.252890303 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.467285 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.950518 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.950617 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.950731 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:11 crc kubenswrapper[4701]: E1121 19:02:11.950883 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.958419 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 21 19:02:11 crc kubenswrapper[4701]: I1121 19:02:11.959791 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 21 19:02:12 crc kubenswrapper[4701]: I1121 19:02:12.949977 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:12 crc kubenswrapper[4701]: E1121 19:02:12.950171 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.485662 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.485781 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.485826 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.485870 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:02:17.485820709 +0000 UTC m=+28.270960776 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.485935 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.485995 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.486002 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486018 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486142 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486166 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486054 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486267 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:17.486242096 +0000 UTC m=+28.271382193 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486270 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486306 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:17.486289506 +0000 UTC m=+28.271429573 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486322 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486391 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:17.486367408 +0000 UTC m=+28.271507465 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486078 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.486469 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:17.486450049 +0000 UTC m=+28.271590116 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.950892 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:13 crc kubenswrapper[4701]: I1121 19:02:13.950940 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.951095 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:13 crc kubenswrapper[4701]: E1121 19:02:13.951252 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:14 crc kubenswrapper[4701]: I1121 19:02:14.950078 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:14 crc kubenswrapper[4701]: E1121 19:02:14.950223 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.128129 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.144403 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.155757 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.168810 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.214532 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.253266 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.264466 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.270357 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.275319 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.293429 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.308893 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.321802 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-hb64h"] Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.322134 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.324032 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.324601 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.324834 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.324893 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.336556 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.352019 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.363390 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.367233 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.381904 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.396514 4701 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.398139 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.398192 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.398228 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.398328 4701 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.403028 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/c769c5d1-60d9-43e1-b130-4373c7eae670-hosts-file\") pod \"node-resolver-hb64h\" (UID: \"c769c5d1-60d9-43e1-b130-4373c7eae670\") " pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.403084 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rz7r\" (UniqueName: \"kubernetes.io/projected/c769c5d1-60d9-43e1-b130-4373c7eae670-kube-api-access-7rz7r\") pod \"node-resolver-hb64h\" (UID: \"c769c5d1-60d9-43e1-b130-4373c7eae670\") " pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.403762 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.406426 4701 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.406674 4701 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.407698 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.407743 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.407753 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.407769 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.407779 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.415671 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.425396 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.428312 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.428337 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.428345 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.428361 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.428370 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.432576 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.441284 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.444363 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.444398 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.444408 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.444424 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.444434 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.445309 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.462406 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.462854 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.466188 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.466247 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.466257 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.466272 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.466281 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.480407 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.485966 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.491345 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.491382 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.491391 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.491405 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.491414 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.493420 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.503712 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/c769c5d1-60d9-43e1-b130-4373c7eae670-hosts-file\") pod \"node-resolver-hb64h\" (UID: \"c769c5d1-60d9-43e1-b130-4373c7eae670\") " pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.503760 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rz7r\" (UniqueName: \"kubernetes.io/projected/c769c5d1-60d9-43e1-b130-4373c7eae670-kube-api-access-7rz7r\") pod \"node-resolver-hb64h\" (UID: \"c769c5d1-60d9-43e1-b130-4373c7eae670\") " pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.503869 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/c769c5d1-60d9-43e1-b130-4373c7eae670-hosts-file\") pod \"node-resolver-hb64h\" (UID: \"c769c5d1-60d9-43e1-b130-4373c7eae670\") " pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.504657 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.504765 4701 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.506156 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.506183 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.506245 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.506262 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.506272 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.506722 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.519961 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.520268 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rz7r\" (UniqueName: \"kubernetes.io/projected/c769c5d1-60d9-43e1-b130-4373c7eae670-kube-api-access-7rz7r\") pod \"node-resolver-hb64h\" (UID: \"c769c5d1-60d9-43e1-b130-4373c7eae670\") " pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.531510 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.608889 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.608928 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.608939 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.608962 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.608974 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.633345 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-hb64h" Nov 21 19:02:15 crc kubenswrapper[4701]: W1121 19:02:15.653098 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc769c5d1_60d9_43e1_b130_4373c7eae670.slice/crio-5c4e757a8b89277eb4beac34ad58f0cdcd921664aa8e49d23f90e72414ff5f60 WatchSource:0}: Error finding container 5c4e757a8b89277eb4beac34ad58f0cdcd921664aa8e49d23f90e72414ff5f60: Status 404 returned error can't find the container with id 5c4e757a8b89277eb4beac34ad58f0cdcd921664aa8e49d23f90e72414ff5f60 Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.711774 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.711811 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.711820 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.711834 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.711845 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.749083 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-xxkwp"] Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.749686 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.750734 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zzdxm"] Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.751576 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.751701 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-kf9jq"] Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.752042 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.752351 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-tbszf"] Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.752650 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.754795 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.755149 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.755300 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.757669 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.757711 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.758081 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.758378 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.758586 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.759243 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.759290 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.759468 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.759485 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.759684 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.759716 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.760006 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.760150 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.760445 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.760564 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.760642 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.776741 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.794436 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.812311 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.813727 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.813767 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.813969 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.813988 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.814000 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.828017 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.846405 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.865645 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.888521 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.902766 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907351 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e70a068b-c06b-4ffe-8496-6f55c321d614-mcd-auth-proxy-config\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907378 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlhlf\" (UniqueName: \"kubernetes.io/projected/e70a068b-c06b-4ffe-8496-6f55c321d614-kube-api-access-mlhlf\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907396 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-netd\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907415 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907431 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907447 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-log-socket\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907466 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-bin\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907483 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-netns\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907499 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-var-lib-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907515 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907569 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-env-overrides\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907624 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907674 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-socket-dir-parent\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907693 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-daemon-config\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907715 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-kubelet\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907734 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-ovn\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907757 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-hostroot\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907774 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-slash\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907794 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd6417be-62d7-4b6a-9711-a89211dca42e-ovn-node-metrics-cert\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907818 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cnibin\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907851 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-system-cni-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907868 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-system-cni-dir\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907883 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kj4cl\" (UniqueName: \"kubernetes.io/projected/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-kube-api-access-kj4cl\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907903 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-os-release\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907918 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-ovn-kubernetes\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907942 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccmf4\" (UniqueName: \"kubernetes.io/projected/cd6417be-62d7-4b6a-9711-a89211dca42e-kube-api-access-ccmf4\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907960 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-os-release\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.907984 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2eababf7-b5d3-4479-9ad5-f1060898f324-cni-binary-copy\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908002 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hzjj\" (UniqueName: \"kubernetes.io/projected/2eababf7-b5d3-4479-9ad5-f1060898f324-kube-api-access-5hzjj\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908017 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-systemd-units\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908032 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-etc-kubernetes\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908049 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e70a068b-c06b-4ffe-8496-6f55c321d614-rootfs\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908064 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-node-log\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908082 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-cnibin\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908097 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-cni-bin\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908140 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-multus-certs\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908160 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-conf-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908181 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-systemd\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908228 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-config\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908245 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-k8s-cni-cncf-io\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908266 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-script-lib\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908290 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-cni-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908306 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-kubelet\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908321 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-etc-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908336 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-netns\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908351 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-cni-multus\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908364 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e70a068b-c06b-4ffe-8496-6f55c321d614-proxy-tls\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.908379 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cni-binary-copy\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.916390 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.916725 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.916755 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.916765 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.916781 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.916791 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:15Z","lastTransitionTime":"2025-11-21T19:02:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.929247 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.942312 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.950381 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.950439 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.950713 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:15 crc kubenswrapper[4701]: E1121 19:02:15.950711 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.956081 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.968792 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:15 crc kubenswrapper[4701]: I1121 19:02:15.992176 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:15Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009460 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-ovn-kubernetes\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009502 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-system-cni-dir\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009521 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kj4cl\" (UniqueName: \"kubernetes.io/projected/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-kube-api-access-kj4cl\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009541 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-os-release\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009559 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-systemd-units\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009579 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccmf4\" (UniqueName: \"kubernetes.io/projected/cd6417be-62d7-4b6a-9711-a89211dca42e-kube-api-access-ccmf4\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009599 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-os-release\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009623 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2eababf7-b5d3-4479-9ad5-f1060898f324-cni-binary-copy\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009619 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-ovn-kubernetes\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009711 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-system-cni-dir\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009639 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hzjj\" (UniqueName: \"kubernetes.io/projected/2eababf7-b5d3-4479-9ad5-f1060898f324-kube-api-access-5hzjj\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009752 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-multus-certs\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009966 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-os-release\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.009996 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-systemd-units\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010110 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-etc-kubernetes\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010141 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-os-release\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010147 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e70a068b-c06b-4ffe-8496-6f55c321d614-rootfs\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010162 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-multus-certs\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010214 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-node-log\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010232 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-cnibin\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010285 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-cni-bin\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010307 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-conf-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010337 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-systemd\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010353 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-config\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010373 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-k8s-cni-cncf-io\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010389 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-etc-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010408 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-script-lib\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010433 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-cni-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010452 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-kubelet\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010467 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cni-binary-copy\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010484 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-netns\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010499 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-cni-multus\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010517 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e70a068b-c06b-4ffe-8496-6f55c321d614-proxy-tls\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010534 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010554 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e70a068b-c06b-4ffe-8496-6f55c321d614-mcd-auth-proxy-config\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010572 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlhlf\" (UniqueName: \"kubernetes.io/projected/e70a068b-c06b-4ffe-8496-6f55c321d614-kube-api-access-mlhlf\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010590 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-netd\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010608 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-var-lib-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010624 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010640 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-log-socket\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010657 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-bin\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010675 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-netns\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010692 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010708 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-env-overrides\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010723 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-kubelet\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010738 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010761 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-socket-dir-parent\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010776 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-daemon-config\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010790 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-ovn\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010809 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-hostroot\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010827 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-slash\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010846 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd6417be-62d7-4b6a-9711-a89211dca42e-ovn-node-metrics-cert\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010871 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cnibin\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.010899 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-system-cni-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011043 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-system-cni-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011069 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-cnibin\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011094 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e70a068b-c06b-4ffe-8496-6f55c321d614-rootfs\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011639 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2eababf7-b5d3-4479-9ad5-f1060898f324-cni-binary-copy\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011678 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-netd\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011890 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-var-lib-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011899 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-kubelet\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011957 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011961 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-netns\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011976 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-etc-kubernetes\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.011973 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-systemd\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012002 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-bin\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012036 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-conf-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012071 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012102 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-node-log\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012105 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-cni-dir\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012129 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-netns\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012153 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-hostroot\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012154 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-cni-multus\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012192 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-cni-bin\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012307 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-log-socket\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012433 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-env-overrides\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012484 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-etc-openvswitch\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012511 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-var-lib-kubelet\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012522 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-socket-dir-parent\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012542 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-slash\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012547 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-ovn\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012573 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cnibin\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012451 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2eababf7-b5d3-4479-9ad5-f1060898f324-host-run-k8s-cni-cncf-io\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012833 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-script-lib\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.012956 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cni-binary-copy\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.013137 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.013246 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e70a068b-c06b-4ffe-8496-6f55c321d614-mcd-auth-proxy-config\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.013307 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2eababf7-b5d3-4479-9ad5-f1060898f324-multus-daemon-config\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.013437 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-config\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.013734 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.017367 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd6417be-62d7-4b6a-9711-a89211dca42e-ovn-node-metrics-cert\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.017426 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e70a068b-c06b-4ffe-8496-6f55c321d614-proxy-tls\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.017526 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.020170 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.020227 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.020238 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.020255 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.020266 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.030131 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccmf4\" (UniqueName: \"kubernetes.io/projected/cd6417be-62d7-4b6a-9711-a89211dca42e-kube-api-access-ccmf4\") pod \"ovnkube-node-zzdxm\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.030893 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kj4cl\" (UniqueName: \"kubernetes.io/projected/5ea0e20e-ab13-4b90-b58a-5b4d377c5ead-kube-api-access-kj4cl\") pod \"multus-additional-cni-plugins-xxkwp\" (UID: \"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\") " pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.033710 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hzjj\" (UniqueName: \"kubernetes.io/projected/2eababf7-b5d3-4479-9ad5-f1060898f324-kube-api-access-5hzjj\") pod \"multus-kf9jq\" (UID: \"2eababf7-b5d3-4479-9ad5-f1060898f324\") " pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.034700 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlhlf\" (UniqueName: \"kubernetes.io/projected/e70a068b-c06b-4ffe-8496-6f55c321d614-kube-api-access-mlhlf\") pod \"machine-config-daemon-tbszf\" (UID: \"e70a068b-c06b-4ffe-8496-6f55c321d614\") " pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.040381 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.063219 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.073548 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.074486 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.079861 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: W1121 19:02:16.087167 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ea0e20e_ab13_4b90_b58a_5b4d377c5ead.slice/crio-92b459f78c732bb5b030770e89cfd86260f709c2aa237a1782563e8f32d9f833 WatchSource:0}: Error finding container 92b459f78c732bb5b030770e89cfd86260f709c2aa237a1782563e8f32d9f833: Status 404 returned error can't find the container with id 92b459f78c732bb5b030770e89cfd86260f709c2aa237a1782563e8f32d9f833 Nov 21 19:02:16 crc kubenswrapper[4701]: W1121 19:02:16.089586 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd6417be_62d7_4b6a_9711_a89211dca42e.slice/crio-476db94b54f79b4d4270d667eb8cb8eee3a7b807e90a12a98387e0eeb9e2310a WatchSource:0}: Error finding container 476db94b54f79b4d4270d667eb8cb8eee3a7b807e90a12a98387e0eeb9e2310a: Status 404 returned error can't find the container with id 476db94b54f79b4d4270d667eb8cb8eee3a7b807e90a12a98387e0eeb9e2310a Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.092849 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.100602 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kf9jq" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.104884 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.106950 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:02:16 crc kubenswrapper[4701]: W1121 19:02:16.116486 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2eababf7_b5d3_4479_9ad5_f1060898f324.slice/crio-7d97eb54ff98c5f1d4793919535ee46bf0c2bbc511d8fe4df3e1daa3efb215da WatchSource:0}: Error finding container 7d97eb54ff98c5f1d4793919535ee46bf0c2bbc511d8fe4df3e1daa3efb215da: Status 404 returned error can't find the container with id 7d97eb54ff98c5f1d4793919535ee46bf0c2bbc511d8fe4df3e1daa3efb215da Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.123348 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.123383 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.123394 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.123414 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.123428 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.124325 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.133360 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-hb64h" event={"ID":"c769c5d1-60d9-43e1-b130-4373c7eae670","Type":"ContainerStarted","Data":"07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.134236 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-hb64h" event={"ID":"c769c5d1-60d9-43e1-b130-4373c7eae670","Type":"ContainerStarted","Data":"5c4e757a8b89277eb4beac34ad58f0cdcd921664aa8e49d23f90e72414ff5f60"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.134266 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kf9jq" event={"ID":"2eababf7-b5d3-4479-9ad5-f1060898f324","Type":"ContainerStarted","Data":"7d97eb54ff98c5f1d4793919535ee46bf0c2bbc511d8fe4df3e1daa3efb215da"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.135068 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"476db94b54f79b4d4270d667eb8cb8eee3a7b807e90a12a98387e0eeb9e2310a"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.136672 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerStarted","Data":"92b459f78c732bb5b030770e89cfd86260f709c2aa237a1782563e8f32d9f833"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.140853 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.153607 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.169466 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.195350 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.236960 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.236989 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.236997 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.237010 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.237036 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.259789 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.276499 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.290805 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.302275 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.314781 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.326950 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.339446 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.339644 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.339697 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.339714 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.339739 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.339754 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.354981 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.377850 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.405702 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.424315 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:16Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.442531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.442570 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.442580 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.442595 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.442605 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.545739 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.545780 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.545788 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.545802 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.545812 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.648470 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.648535 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.648556 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.648604 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.648623 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.751072 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.751144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.751162 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.751188 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.751228 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.855991 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.856424 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.856453 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.856484 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.856507 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.950175 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:16 crc kubenswrapper[4701]: E1121 19:02:16.950385 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.959404 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.959459 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.959475 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.959495 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:16 crc kubenswrapper[4701]: I1121 19:02:16.959511 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:16Z","lastTransitionTime":"2025-11-21T19:02:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.062038 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.062077 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.062086 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.062098 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.062107 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.142369 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kf9jq" event={"ID":"2eababf7-b5d3-4479-9ad5-f1060898f324","Type":"ContainerStarted","Data":"afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.144303 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" exitCode=0 Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.144348 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.146523 4701 generic.go:334] "Generic (PLEG): container finished" podID="5ea0e20e-ab13-4b90-b58a-5b4d377c5ead" containerID="afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92" exitCode=0 Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.146642 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerDied","Data":"afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.157535 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.157615 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.157637 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"7064836abbf287d91956b262d54225253f4a70171fa54417a02d5825a5995151"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.164298 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.164336 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.164351 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.164369 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.164383 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.169425 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.188286 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.201556 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.225413 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.242132 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.258147 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.271817 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.271905 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.271920 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.271944 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.271982 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.281310 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.311062 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.328233 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.344822 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.356646 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.367686 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.374321 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.374398 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.374415 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.374454 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.374468 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.386003 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.401547 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.414884 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.427952 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.456760 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.470321 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.477067 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.477241 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.477392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.477504 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.477583 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.486751 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.504560 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.520738 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.538447 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.543522 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.543747 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.543851 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.543946 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.544083 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.544357 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.544455 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.544532 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.544640 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:25.54462232 +0000 UTC m=+36.329762367 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.544959 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545082 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:25.545066547 +0000 UTC m=+36.330206584 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545262 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:02:25.54525027 +0000 UTC m=+36.330390307 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545436 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545471 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545482 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545524 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:25.545509234 +0000 UTC m=+36.330649251 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545575 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.545599 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:25.545592625 +0000 UTC m=+36.330732792 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.556323 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.573573 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.580782 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.580938 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.581030 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.581120 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.581195 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.591336 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.606294 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:17Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.683515 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.683821 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.684003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.684141 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.684308 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.789811 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.789866 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.789881 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.789910 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.789930 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.894384 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.894965 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.894980 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.895007 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.895032 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.950897 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.951091 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.952019 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:17 crc kubenswrapper[4701]: E1121 19:02:17.952234 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.978824 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-n6w8v"] Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.979457 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.983250 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.983502 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.983519 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.986139 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.999031 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.999102 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.999122 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.999149 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:17 crc kubenswrapper[4701]: I1121 19:02:17.999166 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:17Z","lastTransitionTime":"2025-11-21T19:02:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.002736 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.021501 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.039048 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.049701 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/57baab98-95f2-4dff-94ff-a296ffe8a418-host\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.050013 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clkbh\" (UniqueName: \"kubernetes.io/projected/57baab98-95f2-4dff-94ff-a296ffe8a418-kube-api-access-clkbh\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.050112 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/57baab98-95f2-4dff-94ff-a296ffe8a418-serviceca\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.054394 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.102618 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.102684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.102706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.102735 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.102756 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.102614 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.116478 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.150770 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/57baab98-95f2-4dff-94ff-a296ffe8a418-serviceca\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.150830 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/57baab98-95f2-4dff-94ff-a296ffe8a418-host\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.150853 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clkbh\" (UniqueName: \"kubernetes.io/projected/57baab98-95f2-4dff-94ff-a296ffe8a418-kube-api-access-clkbh\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.151436 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.151702 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/57baab98-95f2-4dff-94ff-a296ffe8a418-host\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.152949 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/57baab98-95f2-4dff-94ff-a296ffe8a418-serviceca\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.176768 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.194047 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clkbh\" (UniqueName: \"kubernetes.io/projected/57baab98-95f2-4dff-94ff-a296ffe8a418-kube-api-access-clkbh\") pod \"node-ca-n6w8v\" (UID: \"57baab98-95f2-4dff-94ff-a296ffe8a418\") " pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.203531 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.203964 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerStarted","Data":"cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.208563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.208606 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.208617 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.208639 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.208653 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.212054 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.212112 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.212127 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.212139 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.226831 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.248699 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.271478 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.287493 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.299374 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-n6w8v" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.300130 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.312791 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.312829 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.312839 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.312856 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.312866 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.316322 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.334024 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: W1121 19:02:18.349249 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57baab98_95f2_4dff_94ff_a296ffe8a418.slice/crio-5e15ab54ef598b183215bb1186261f77d0b901bea5e666de05a73cdbd26c806e WatchSource:0}: Error finding container 5e15ab54ef598b183215bb1186261f77d0b901bea5e666de05a73cdbd26c806e: Status 404 returned error can't find the container with id 5e15ab54ef598b183215bb1186261f77d0b901bea5e666de05a73cdbd26c806e Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.359423 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.376065 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.394299 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.415687 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.415754 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.415773 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.415800 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.415819 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.416906 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.440337 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.452145 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.463811 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.478654 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.492725 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.508159 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.518778 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.518866 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.518891 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.518919 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.518942 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.529321 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.547407 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:18Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.622001 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.622048 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.622060 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.622077 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.622088 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.725354 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.725390 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.725398 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.725412 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.725424 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.829088 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.829158 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.829176 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.829238 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.829257 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.933006 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.933116 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.933138 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.933177 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.933240 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:18Z","lastTransitionTime":"2025-11-21T19:02:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:18 crc kubenswrapper[4701]: I1121 19:02:18.950170 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:18 crc kubenswrapper[4701]: E1121 19:02:18.950474 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.036913 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.036978 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.036993 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.037015 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.037030 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.140401 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.140500 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.140547 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.140587 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.140615 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.224117 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.224277 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.227147 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-n6w8v" event={"ID":"57baab98-95f2-4dff-94ff-a296ffe8a418","Type":"ContainerStarted","Data":"fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.227259 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-n6w8v" event={"ID":"57baab98-95f2-4dff-94ff-a296ffe8a418","Type":"ContainerStarted","Data":"5e15ab54ef598b183215bb1186261f77d0b901bea5e666de05a73cdbd26c806e"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.232171 4701 generic.go:334] "Generic (PLEG): container finished" podID="5ea0e20e-ab13-4b90-b58a-5b4d377c5ead" containerID="cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53" exitCode=0 Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.232285 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerDied","Data":"cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.244003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.244075 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.244097 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.244123 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.244141 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.254598 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.274617 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.291707 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.313993 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.333823 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.348843 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.348892 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.348903 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.348926 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.348944 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.354685 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.379701 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.396542 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.416827 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.434669 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.453741 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.453841 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.453868 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.453861 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.453918 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.454660 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.488845 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.507559 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.522997 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.541748 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.558372 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.558422 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.558432 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.558451 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.558464 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.565019 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.599485 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.616259 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.633406 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.650432 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.661135 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.661262 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.661281 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.661307 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.661323 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.667497 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.681902 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.698124 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.717915 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.740374 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.758388 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.764302 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.764357 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.764371 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.764391 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.764406 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.774839 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.788918 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.867011 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.867060 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.867072 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.867094 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.867105 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.951032 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.951124 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:19 crc kubenswrapper[4701]: E1121 19:02:19.951255 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:19 crc kubenswrapper[4701]: E1121 19:02:19.951323 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.970661 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.970699 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.970712 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.970729 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.970742 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:19Z","lastTransitionTime":"2025-11-21T19:02:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.972999 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:19 crc kubenswrapper[4701]: I1121 19:02:19.992941 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:19Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.013546 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.037594 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.061613 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.073275 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.073671 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.074126 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.074170 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.074190 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.091090 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.124533 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.146096 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.166370 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.178706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.178801 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.178821 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.178853 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.178877 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.184338 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.209267 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.234296 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.240840 4701 generic.go:334] "Generic (PLEG): container finished" podID="5ea0e20e-ab13-4b90-b58a-5b4d377c5ead" containerID="51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d" exitCode=0 Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.240916 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerDied","Data":"51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.260988 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.282607 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.282726 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.282771 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.282783 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.282804 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.282819 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.305286 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.322961 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.351538 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.376273 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.387436 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.387511 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.387530 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.387560 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.387579 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.396751 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.419539 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.439088 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.456167 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.490831 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.490879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.490896 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.490919 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.490936 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.513153 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.531827 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.551805 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.578424 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.594331 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.594385 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.594404 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.594429 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.594445 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.596111 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.611125 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:20Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.697561 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.697644 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.697665 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.697698 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.697722 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.802077 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.802142 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.802160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.802191 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.802250 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.906422 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.906498 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.906516 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.906546 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.906565 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:20Z","lastTransitionTime":"2025-11-21T19:02:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:20 crc kubenswrapper[4701]: I1121 19:02:20.950547 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:20 crc kubenswrapper[4701]: E1121 19:02:20.950748 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.009977 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.010032 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.010048 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.010073 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.010091 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.113670 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.113747 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.113771 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.113804 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.113828 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.217656 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.217853 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.217877 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.217910 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.217930 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.251000 4701 generic.go:334] "Generic (PLEG): container finished" podID="5ea0e20e-ab13-4b90-b58a-5b4d377c5ead" containerID="357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d" exitCode=0 Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.251104 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerDied","Data":"357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.261467 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.283091 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.309922 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.321454 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.321535 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.321564 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.321598 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.321623 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.335761 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.364059 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.385805 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.405401 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.425415 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.425482 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.425495 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.425518 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.425535 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.436683 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.452628 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.464014 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.476388 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.500059 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.514278 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.528953 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.529006 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.529025 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.529052 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.529070 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.534943 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.550078 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:21Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.632561 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.632639 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.632658 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.632686 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.632704 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.735635 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.735706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.735723 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.735750 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.735765 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.839251 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.839338 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.839363 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.839396 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.839420 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.944193 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.944270 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.944283 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.944302 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.944317 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:21Z","lastTransitionTime":"2025-11-21T19:02:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.950604 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:21 crc kubenswrapper[4701]: I1121 19:02:21.950704 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:21 crc kubenswrapper[4701]: E1121 19:02:21.950842 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:21 crc kubenswrapper[4701]: E1121 19:02:21.950932 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.072582 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.072640 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.072654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.072681 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.072696 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.176842 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.176908 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.176921 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.176954 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.176970 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.271603 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerStarted","Data":"5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.279458 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.279520 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.279546 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.279583 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.279610 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.297158 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.314499 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.338911 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.364303 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.383794 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.383848 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.383865 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.383891 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.383910 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.390578 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.414959 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.451298 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.469663 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.486823 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.486902 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.486926 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.486956 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.486975 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.489476 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.514126 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.532973 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.548798 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.573345 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.591381 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.591445 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.591463 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.591490 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.591508 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.596053 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:22Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.694516 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.694571 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.694592 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.694615 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.694632 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.797415 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.797478 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.797499 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.797524 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.797541 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.901295 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.901348 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.901366 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.901390 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.901407 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:22Z","lastTransitionTime":"2025-11-21T19:02:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:22 crc kubenswrapper[4701]: I1121 19:02:22.949964 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:22 crc kubenswrapper[4701]: E1121 19:02:22.950121 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.004915 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.004972 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.004990 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.005020 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.005038 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.107706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.107779 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.107801 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.107832 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.107857 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.211110 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.211150 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.211159 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.211175 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.211185 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.279033 4701 generic.go:334] "Generic (PLEG): container finished" podID="5ea0e20e-ab13-4b90-b58a-5b4d377c5ead" containerID="5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f" exitCode=0 Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.279095 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerDied","Data":"5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.288077 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.288513 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.304632 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.313383 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.313400 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.313407 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.313421 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.313433 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.324819 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.356643 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.372612 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.384952 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.405970 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.416157 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.416228 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.416246 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.416268 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.416286 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.423290 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.428852 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.444567 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.461822 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.476517 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.494027 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.512072 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.519273 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.519357 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.519382 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.519413 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.519434 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.534118 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.559780 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.576594 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.590816 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.607051 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.621924 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.621991 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.622009 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.622038 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.622058 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.624078 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.639705 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.669401 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.684068 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.699085 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.715799 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.724809 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.724863 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.724885 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.724915 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.724941 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.737513 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.763260 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.784979 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.800488 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.800633 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.811830 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.828014 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.828681 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.828740 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.828759 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.828786 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.828806 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.843489 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.855023 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.875461 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.892946 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.909237 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.925427 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.930868 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.930918 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.930934 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.930956 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.930975 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:23Z","lastTransitionTime":"2025-11-21T19:02:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.941244 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.951048 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:23 crc kubenswrapper[4701]: E1121 19:02:23.951247 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.951055 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:23 crc kubenswrapper[4701]: E1121 19:02:23.951480 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.960047 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.976844 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:23 crc kubenswrapper[4701]: I1121 19:02:23.994427 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:23Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.028151 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.033289 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.033342 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.033361 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.033385 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.033402 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.045782 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.060609 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.136843 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.136910 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.136927 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.136953 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.136970 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.240631 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.240984 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.241256 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.241491 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.241686 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.298838 4701 generic.go:334] "Generic (PLEG): container finished" podID="5ea0e20e-ab13-4b90-b58a-5b4d377c5ead" containerID="4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3" exitCode=0 Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.299373 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerDied","Data":"4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.299859 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.300030 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.324082 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.345463 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.346648 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.346694 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.346712 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.346740 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.346761 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.392912 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.414172 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.436167 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.451620 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.451654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.451663 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.451675 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.451685 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.455440 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.473948 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.489462 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.514322 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.534640 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.557713 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.558084 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.558154 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.558185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.558241 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.563094 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.582542 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.613860 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.629727 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.644528 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.662646 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.662817 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.662874 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.662893 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.662923 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.662943 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.680975 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.694817 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.717624 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.738243 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.760748 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.767255 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.767306 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.767323 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.767345 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.767362 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.783678 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.803100 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.822357 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.836388 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.854579 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.871418 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.871479 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.871493 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.871516 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.871533 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.881861 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.904231 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.935398 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:24Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.950978 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:24 crc kubenswrapper[4701]: E1121 19:02:24.951274 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.975174 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.975253 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.975271 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.975295 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:24 crc kubenswrapper[4701]: I1121 19:02:24.975313 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:24Z","lastTransitionTime":"2025-11-21T19:02:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.077587 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.077654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.077672 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.077701 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.077721 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.181772 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.181862 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.181884 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.181915 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.181939 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.287187 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.287299 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.287318 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.287347 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.287368 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.316358 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" event={"ID":"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead","Type":"ContainerStarted","Data":"ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.337042 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.365193 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.389590 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.390886 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.390936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.390953 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.390977 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.390997 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.412831 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.478054 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.495109 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.495657 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.495702 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.495717 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.495736 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.495751 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.513683 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.530770 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.546456 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.555963 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.556001 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.556013 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.556042 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.556063 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.563021 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.572915 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.573007 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.573038 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.573066 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.573091 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573218 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573270 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:41.573255449 +0000 UTC m=+52.358395476 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573571 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:02:41.573561285 +0000 UTC m=+52.358701312 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573653 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573671 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573683 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573711 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:41.573702278 +0000 UTC m=+52.358842305 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573753 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573780 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:41.5737721 +0000 UTC m=+52.358912127 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573840 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573852 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573861 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.573888 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:41.573879792 +0000 UTC m=+52.359019819 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.574144 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.577564 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.577594 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.577604 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.577619 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.577629 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.581429 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.592070 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.594932 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.594985 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.594994 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.595011 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.595022 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.602242 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.608410 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.612756 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.612808 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.612819 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.612837 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.612847 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.620418 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.624838 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.628544 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.628581 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.628592 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.628614 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.628628 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.637038 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.642972 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:25Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.643097 4701 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.644668 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.644706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.644717 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.644738 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.644751 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.747623 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.747698 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.747708 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.747726 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.747737 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.850503 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.850639 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.850674 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.850708 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.850727 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.950718 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.950733 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.950933 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:25 crc kubenswrapper[4701]: E1121 19:02:25.951068 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.952599 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.952658 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.952675 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.952700 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:25 crc kubenswrapper[4701]: I1121 19:02:25.952718 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:25Z","lastTransitionTime":"2025-11-21T19:02:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.056424 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.056491 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.056508 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.056531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.056548 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.159608 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.159646 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.159659 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.159679 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.159692 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.261993 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.262034 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.262045 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.262061 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.262073 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.322942 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/0.log" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.326370 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda" exitCode=1 Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.326496 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.327118 4701 scope.go:117] "RemoveContainer" containerID="549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.348812 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.366134 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.366463 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.366602 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.366838 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.366984 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.375124 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.406402 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.433847 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.456274 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.472183 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.472290 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.472309 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.472338 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.472361 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.481733 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.508904 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.534559 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.561130 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.576027 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.576063 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.576081 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.576108 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.576126 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.582869 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.604779 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.634311 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:26Z\\\",\\\"message\\\":\\\"l\\\\nI1121 19:02:26.139350 6014 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 19:02:26.139386 6014 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:26.139418 6014 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:26.139423 6014 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:26.139437 6014 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:26.139442 6014 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:26.139485 6014 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 19:02:26.139494 6014 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 19:02:26.139502 6014 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 19:02:26.139510 6014 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:26.139517 6014 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:26.139524 6014 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:26.139547 6014 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 19:02:26.139569 6014 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:26.139595 6014 factory.go:656] Stopping watch factory\\\\nI1121 19:02:26.139609 6014 ovnkube.go:599] Stopped ovnkube\\\\nI1121 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.656107 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.675290 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:26Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.680524 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.680571 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.680588 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.680613 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.680633 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.784113 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.784191 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.784223 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.784238 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.784251 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.886230 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.886257 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.886265 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.886277 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.886286 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.950362 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:26 crc kubenswrapper[4701]: E1121 19:02:26.950489 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.988253 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.988285 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.988293 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.988310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:26 crc kubenswrapper[4701]: I1121 19:02:26.988319 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:26Z","lastTransitionTime":"2025-11-21T19:02:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.091082 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.091142 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.091154 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.091170 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.091182 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.194732 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.194771 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.194782 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.194798 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.194810 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.301899 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.301983 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.301996 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.302017 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.302031 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.333086 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/0.log" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.336599 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.337184 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.365317 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.405035 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.405079 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.405088 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.405107 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.405121 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.405362 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.426808 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.445495 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.469613 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:26Z\\\",\\\"message\\\":\\\"l\\\\nI1121 19:02:26.139350 6014 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 19:02:26.139386 6014 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:26.139418 6014 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:26.139423 6014 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:26.139437 6014 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:26.139442 6014 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:26.139485 6014 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 19:02:26.139494 6014 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 19:02:26.139502 6014 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 19:02:26.139510 6014 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:26.139517 6014 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:26.139524 6014 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:26.139547 6014 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 19:02:26.139569 6014 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:26.139595 6014 factory.go:656] Stopping watch factory\\\\nI1121 19:02:26.139609 6014 ovnkube.go:599] Stopped ovnkube\\\\nI1121 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.484088 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.495721 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.506473 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.512609 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.512737 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.512752 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.512796 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.512816 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.529138 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.543488 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.558754 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.573761 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.595925 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.616337 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.616398 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.616415 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.616444 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.616467 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.620833 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.720017 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.720103 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.720130 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.720165 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.720187 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.822932 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.822995 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.823013 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.823039 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.823058 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.925962 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.926032 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.926052 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.926077 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.926097 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:27Z","lastTransitionTime":"2025-11-21T19:02:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.950447 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:27 crc kubenswrapper[4701]: I1121 19:02:27.950520 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:27 crc kubenswrapper[4701]: E1121 19:02:27.950623 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:27 crc kubenswrapper[4701]: E1121 19:02:27.950832 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.030031 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.030103 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.030121 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.030146 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.030168 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.132828 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.133167 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.133371 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.133522 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.133645 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.236258 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.236312 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.236324 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.236342 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.236354 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.338983 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.339062 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.339086 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.339117 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.339141 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.342010 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/1.log" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.343070 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/0.log" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.346947 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c" exitCode=1 Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.347000 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.347077 4701 scope.go:117] "RemoveContainer" containerID="549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.348330 4701 scope.go:117] "RemoveContainer" containerID="ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c" Nov 21 19:02:28 crc kubenswrapper[4701]: E1121 19:02:28.348734 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.378302 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:26Z\\\",\\\"message\\\":\\\"l\\\\nI1121 19:02:26.139350 6014 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 19:02:26.139386 6014 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:26.139418 6014 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:26.139423 6014 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:26.139437 6014 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:26.139442 6014 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:26.139485 6014 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 19:02:26.139494 6014 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 19:02:26.139502 6014 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 19:02:26.139510 6014 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:26.139517 6014 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:26.139524 6014 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:26.139547 6014 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 19:02:26.139569 6014 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:26.139595 6014 factory.go:656] Stopping watch factory\\\\nI1121 19:02:26.139609 6014 ovnkube.go:599] Stopped ovnkube\\\\nI1121 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.399581 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.414169 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx"] Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.415386 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.417625 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.418658 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.418718 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.439456 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.442106 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.442181 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.442239 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.442279 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.442303 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.460475 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.481471 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.503547 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.505967 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a640e57-40c2-4e96-829d-c2ace468c63c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.506033 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkqmt\" (UniqueName: \"kubernetes.io/projected/1a640e57-40c2-4e96-829d-c2ace468c63c-kube-api-access-gkqmt\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.506103 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a640e57-40c2-4e96-829d-c2ace468c63c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.506169 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a640e57-40c2-4e96-829d-c2ace468c63c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.521918 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.535601 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.544439 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.544513 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.544543 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.544575 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.544598 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.560412 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.578134 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.599161 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.607621 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a640e57-40c2-4e96-829d-c2ace468c63c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.607713 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a640e57-40c2-4e96-829d-c2ace468c63c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.607747 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a640e57-40c2-4e96-829d-c2ace468c63c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.607769 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkqmt\" (UniqueName: \"kubernetes.io/projected/1a640e57-40c2-4e96-829d-c2ace468c63c-kube-api-access-gkqmt\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.608950 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a640e57-40c2-4e96-829d-c2ace468c63c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.609348 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a640e57-40c2-4e96-829d-c2ace468c63c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.618559 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a640e57-40c2-4e96-829d-c2ace468c63c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.628924 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.634769 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkqmt\" (UniqueName: \"kubernetes.io/projected/1a640e57-40c2-4e96-829d-c2ace468c63c-kube-api-access-gkqmt\") pod \"ovnkube-control-plane-749d76644c-2qmlx\" (UID: \"1a640e57-40c2-4e96-829d-c2ace468c63c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.647492 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.647556 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.647579 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.647608 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.647629 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.652539 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.671274 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.688364 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.701372 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.724857 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://549d541aace6f338d9ab51a90e86a43fe7320bcf2fecb69b7b9ab2e06d218fda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:26Z\\\",\\\"message\\\":\\\"l\\\\nI1121 19:02:26.139350 6014 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 19:02:26.139386 6014 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:26.139418 6014 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:26.139423 6014 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:26.139437 6014 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:26.139442 6014 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:26.139485 6014 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 19:02:26.139494 6014 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 19:02:26.139502 6014 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 19:02:26.139510 6014 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:26.139517 6014 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:26.139524 6014 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:26.139547 6014 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 19:02:26.139569 6014 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:26.139595 6014 factory.go:656] Stopping watch factory\\\\nI1121 19:02:26.139609 6014 ovnkube.go:599] Stopped ovnkube\\\\nI1121 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.735414 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.748377 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.750724 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.750796 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.750815 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.750840 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.750858 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.762601 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: W1121 19:02:28.762684 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a640e57_40c2_4e96_829d_c2ace468c63c.slice/crio-41c3c3d4f700ab1c51d3599dd1c2ff14baa0ae3396b9fb8154d2c9e6d85fa5c6 WatchSource:0}: Error finding container 41c3c3d4f700ab1c51d3599dd1c2ff14baa0ae3396b9fb8154d2c9e6d85fa5c6: Status 404 returned error can't find the container with id 41c3c3d4f700ab1c51d3599dd1c2ff14baa0ae3396b9fb8154d2c9e6d85fa5c6 Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.784516 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.804279 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.823803 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.842470 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.853468 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.853529 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.853552 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.853582 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.853605 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.861642 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.875728 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.903747 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.919702 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.937632 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:28Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.950158 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:28 crc kubenswrapper[4701]: E1121 19:02:28.950333 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.955569 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.955608 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.955622 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.955641 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:28 crc kubenswrapper[4701]: I1121 19:02:28.955657 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:28Z","lastTransitionTime":"2025-11-21T19:02:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.058634 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.058732 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.058761 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.058800 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.058823 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.161145 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.161173 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.161181 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.161193 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.161258 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.263830 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.263887 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.263903 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.263926 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.263945 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.353046 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/1.log" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.357051 4701 scope.go:117] "RemoveContainer" containerID="ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c" Nov 21 19:02:29 crc kubenswrapper[4701]: E1121 19:02:29.357281 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.358834 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" event={"ID":"1a640e57-40c2-4e96-829d-c2ace468c63c","Type":"ContainerStarted","Data":"4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.358866 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" event={"ID":"1a640e57-40c2-4e96-829d-c2ace468c63c","Type":"ContainerStarted","Data":"296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.358880 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" event={"ID":"1a640e57-40c2-4e96-829d-c2ace468c63c","Type":"ContainerStarted","Data":"41c3c3d4f700ab1c51d3599dd1c2ff14baa0ae3396b9fb8154d2c9e6d85fa5c6"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.365609 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.365639 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.365646 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.365663 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.365671 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.381951 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.397727 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.415287 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.436692 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.448797 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.459353 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.468563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.468595 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.468605 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.468619 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.468630 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.479634 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.494530 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.516111 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.530228 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.545600 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.558407 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.569021 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.575185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.575314 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.575333 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.575359 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.575377 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.597004 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.611340 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.630629 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.647015 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.662290 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.677888 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.677951 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.677968 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.677991 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.678024 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.680351 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.696933 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.717051 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.735436 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.749075 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.767379 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.781048 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.781082 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.781091 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.781106 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.781116 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.784276 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.808438 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.829758 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.848062 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.867363 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.884598 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.884665 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.884688 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.884719 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.884740 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.888045 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.951040 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.951107 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:29 crc kubenswrapper[4701]: E1121 19:02:29.951310 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:29 crc kubenswrapper[4701]: E1121 19:02:29.951497 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.967690 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-q5n7s"] Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.968357 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:29 crc kubenswrapper[4701]: E1121 19:02:29.968446 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.971725 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.988114 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.988174 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.988194 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.988252 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.988272 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:29Z","lastTransitionTime":"2025-11-21T19:02:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:29 crc kubenswrapper[4701]: I1121 19:02:29.997330 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:29Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.020354 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.045385 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.066743 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.091532 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.091611 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.091630 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.092228 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.092316 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.092451 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.114518 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.124431 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.124540 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n54wn\" (UniqueName: \"kubernetes.io/projected/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-kube-api-access-n54wn\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.146454 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.165567 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.178371 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.193412 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.194750 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.194810 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.194830 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.194856 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.194877 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.209478 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.225037 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.225101 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n54wn\" (UniqueName: \"kubernetes.io/projected/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-kube-api-access-n54wn\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:30 crc kubenswrapper[4701]: E1121 19:02:30.225171 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:30 crc kubenswrapper[4701]: E1121 19:02:30.225251 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:30.725228957 +0000 UTC m=+41.510368984 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.231060 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.245550 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n54wn\" (UniqueName: \"kubernetes.io/projected/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-kube-api-access-n54wn\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.249877 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.266403 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.283217 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.299126 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.299161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.299173 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.299190 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.299233 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.300909 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.313179 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.331368 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.350685 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.367895 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.387145 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.402042 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.402143 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.402269 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.402370 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.402406 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.405830 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.435085 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.448366 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.461038 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.482060 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.498662 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.505461 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.505532 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.505547 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.505573 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.505593 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.509441 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.529592 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.549099 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:30Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.608258 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.608332 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.608352 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.608381 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.608401 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.712140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.712558 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.712577 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.712602 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.712620 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.729985 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:30 crc kubenswrapper[4701]: E1121 19:02:30.730352 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:30 crc kubenswrapper[4701]: E1121 19:02:30.730462 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:31.730431827 +0000 UTC m=+42.515571894 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.815662 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.815719 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.815737 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.815760 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.815779 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.919627 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.919695 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.919718 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.919748 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.919772 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:30Z","lastTransitionTime":"2025-11-21T19:02:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:30 crc kubenswrapper[4701]: I1121 19:02:30.951196 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:30 crc kubenswrapper[4701]: E1121 19:02:30.951470 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.023655 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.023703 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.023723 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.023748 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.023766 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.126771 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.126842 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.126861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.126888 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.126907 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.229738 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.229787 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.229798 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.229814 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.229826 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.333518 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.333971 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.334481 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.334684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.335073 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.438886 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.438942 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.438960 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.438984 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.439001 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.541516 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.541573 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.541591 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.541614 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.541632 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.644838 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.645003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.645030 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.645057 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.645077 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.740787 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:31 crc kubenswrapper[4701]: E1121 19:02:31.740989 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:31 crc kubenswrapper[4701]: E1121 19:02:31.741142 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:33.741108774 +0000 UTC m=+44.526248841 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.747787 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.747820 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.747829 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.747841 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.747849 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.850948 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.851014 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.851031 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.851055 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.851072 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.950615 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.950724 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:31 crc kubenswrapper[4701]: E1121 19:02:31.950841 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.950887 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:31 crc kubenswrapper[4701]: E1121 19:02:31.951050 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:31 crc kubenswrapper[4701]: E1121 19:02:31.951419 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.953839 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.953907 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.953931 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.954027 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:31 crc kubenswrapper[4701]: I1121 19:02:31.954128 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:31Z","lastTransitionTime":"2025-11-21T19:02:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.057653 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.057720 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.057738 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.057763 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.057781 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.159761 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.159815 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.159826 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.159878 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.159893 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.262911 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.263339 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.263559 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.263738 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.263901 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.367144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.367186 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.367247 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.367276 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.367293 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.470275 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.471151 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.471402 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.471603 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.471811 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.575420 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.575765 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.575947 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.576123 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.576292 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.679159 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.679270 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.679299 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.679327 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.679348 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.782807 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.783174 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.783425 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.783631 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.783806 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.886850 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.886939 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.886967 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.887002 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.887028 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.950462 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:32 crc kubenswrapper[4701]: E1121 19:02:32.950914 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.990942 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.991003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.991021 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.991048 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:32 crc kubenswrapper[4701]: I1121 19:02:32.991067 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:32Z","lastTransitionTime":"2025-11-21T19:02:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.112280 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.112358 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.112384 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.112416 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.112439 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.217133 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.217243 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.217267 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.217298 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.217319 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.321098 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.321160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.321177 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.321230 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.321248 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.424407 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.424479 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.424502 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.424530 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.424552 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.527415 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.527459 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.527469 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.527485 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.527497 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.630287 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.630340 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.630353 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.630370 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.630383 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.733644 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.733936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.734052 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.734230 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.734336 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.766553 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:33 crc kubenswrapper[4701]: E1121 19:02:33.766776 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:33 crc kubenswrapper[4701]: E1121 19:02:33.766862 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:37.766835785 +0000 UTC m=+48.551975852 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.837726 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.837794 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.837811 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.837837 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.837856 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.941072 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.941161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.941190 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.941266 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.941293 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:33Z","lastTransitionTime":"2025-11-21T19:02:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.950884 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:33 crc kubenswrapper[4701]: E1121 19:02:33.951017 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.950898 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:33 crc kubenswrapper[4701]: I1121 19:02:33.951185 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:33 crc kubenswrapper[4701]: E1121 19:02:33.951561 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:33 crc kubenswrapper[4701]: E1121 19:02:33.951734 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.044365 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.044417 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.044434 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.044465 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.044483 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.148304 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.148713 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.148945 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.149191 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.149409 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.252859 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.253352 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.253549 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.253728 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.253900 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.357317 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.357390 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.357411 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.357437 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.357453 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.460455 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.460522 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.460539 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.460563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.460582 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.563991 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.564053 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.564069 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.564094 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.564112 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.666959 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.667012 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.667023 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.667040 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.667054 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.770188 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.770291 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.770310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.770335 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.770354 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.873620 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.873674 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.873691 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.873713 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.873732 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.950119 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:34 crc kubenswrapper[4701]: E1121 19:02:34.950315 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.976426 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.976489 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.976512 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.976542 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:34 crc kubenswrapper[4701]: I1121 19:02:34.976564 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:34Z","lastTransitionTime":"2025-11-21T19:02:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.079602 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.079672 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.079698 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.079722 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.079740 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.182998 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.183045 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.183061 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.183086 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.183104 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.285659 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.285718 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.285735 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.285759 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.285782 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.388862 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.388936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.388954 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.388979 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.388997 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.491469 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.491876 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.492239 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.492401 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.492596 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.596449 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.596544 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.596561 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.596585 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.596602 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.699508 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.699570 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.699587 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.699633 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.699651 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.802503 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.802545 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.802562 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.802585 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.802602 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.841755 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.841808 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.841825 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.841844 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.841860 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.863425 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:35Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.869113 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.869156 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.869172 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.869193 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.869249 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.889811 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:35Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.895992 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.896310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.896522 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.896743 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.896930 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.918443 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:35Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.923471 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.923543 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.923568 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.923598 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.923621 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.943265 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:35Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.948316 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.948366 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.948382 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.948403 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.948419 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.950970 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.951154 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.951294 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.951547 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.951736 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.951911 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.969464 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:35Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:35 crc kubenswrapper[4701]: E1121 19:02:35.969689 4701 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.972691 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.972916 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.973083 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.973314 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:35 crc kubenswrapper[4701]: I1121 19:02:35.973498 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:35Z","lastTransitionTime":"2025-11-21T19:02:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.078326 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.078761 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.079044 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.079304 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.079529 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.183183 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.183268 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.183286 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.183310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.183327 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.286854 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.286930 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.286968 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.286999 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.287022 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.390190 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.390274 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.390292 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.390314 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.390331 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.493481 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.493548 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.493566 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.493592 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.493611 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.596003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.596115 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.596135 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.596193 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.596271 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.698867 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.698928 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.698945 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.698969 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.698986 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.802589 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.802661 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.802680 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.802706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.802724 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.905534 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.905765 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.905826 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.905878 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.905897 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:36Z","lastTransitionTime":"2025-11-21T19:02:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:36 crc kubenswrapper[4701]: I1121 19:02:36.950702 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:36 crc kubenswrapper[4701]: E1121 19:02:36.950914 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.008977 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.009041 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.009065 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.009094 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.009115 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.112027 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.112078 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.112094 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.112129 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.112146 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.219980 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.220059 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.220078 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.220104 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.220122 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.323521 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.323580 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.323597 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.323622 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.323647 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.426551 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.426897 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.427049 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.427237 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.427387 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.530537 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.530926 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.531130 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.531352 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.531490 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.634918 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.634984 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.635002 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.635029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.635047 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.738757 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.738835 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.738853 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.738881 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.738898 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.814517 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:37 crc kubenswrapper[4701]: E1121 19:02:37.814720 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:37 crc kubenswrapper[4701]: E1121 19:02:37.814803 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:02:45.814779999 +0000 UTC m=+56.599920056 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.841964 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.842034 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.842051 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.842077 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.842098 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.944246 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.944322 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.944340 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.944362 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.944381 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:37Z","lastTransitionTime":"2025-11-21T19:02:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.950864 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.950934 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:37 crc kubenswrapper[4701]: I1121 19:02:37.950879 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:37 crc kubenswrapper[4701]: E1121 19:02:37.951037 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:37 crc kubenswrapper[4701]: E1121 19:02:37.951234 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:37 crc kubenswrapper[4701]: E1121 19:02:37.951354 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.047182 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.047301 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.047322 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.047354 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.047377 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.150319 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.150477 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.150503 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.150532 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.150555 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.254435 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.254530 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.254547 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.254574 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.254595 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.357890 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.358012 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.358037 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.358070 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.358092 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.376101 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.386937 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.415660 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.452163 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.461039 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.461118 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.461144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.461169 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.461188 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.470661 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.488711 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.504041 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.519392 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.538859 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.553822 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.564038 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.564160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.564247 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.564336 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.564401 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.570561 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.588264 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.603119 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.622832 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.649468 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.664674 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.667574 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.667657 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.667688 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.667720 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.667743 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.679742 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.699025 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:38Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.770760 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.770832 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.770850 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.770878 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.770918 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.878896 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.878963 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.878982 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.879008 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.879027 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.950158 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:38 crc kubenswrapper[4701]: E1121 19:02:38.950399 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.982349 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.982407 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.982426 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.982451 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:38 crc kubenswrapper[4701]: I1121 19:02:38.982469 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:38Z","lastTransitionTime":"2025-11-21T19:02:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.086090 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.086159 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.086185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.086250 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.086278 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.189464 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.189532 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.189554 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.189579 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.189603 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.291890 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.291947 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.292006 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.292029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.292075 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.394951 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.395029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.395054 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.395083 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.395106 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.498322 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.498397 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.498420 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.498496 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.498602 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.601779 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.601846 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.601863 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.601892 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.601910 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.705334 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.706398 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.706440 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.706464 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.706483 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.808886 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.809244 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.809440 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.809589 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.809719 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.913273 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.913339 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.913357 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.913383 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.913400 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:39Z","lastTransitionTime":"2025-11-21T19:02:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.949982 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.950050 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:39 crc kubenswrapper[4701]: E1121 19:02:39.952530 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.950240 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:39 crc kubenswrapper[4701]: E1121 19:02:39.950136 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:39 crc kubenswrapper[4701]: E1121 19:02:39.952679 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.971591 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:39Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:39 crc kubenswrapper[4701]: I1121 19:02:39.992893 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:39Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.012865 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.016960 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.017397 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.017552 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.017763 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.017970 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.047802 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.066401 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.083863 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.105852 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.122516 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.122570 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.122590 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.122616 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.122634 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.124884 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.143722 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.169378 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.187456 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.206492 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.225973 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.226048 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.226071 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.226105 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.226127 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.226526 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.244036 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.268072 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.289676 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.306744 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:40Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.329744 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.329820 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.329842 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.329873 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.329895 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.432763 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.432829 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.432847 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.432871 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.432888 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.536766 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.536857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.536884 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.536918 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.536940 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.639858 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.639934 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.639954 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.639981 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.640000 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.743555 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.743613 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.743632 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.743658 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.743676 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.846568 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.846654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.846683 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.846716 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.846738 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.949610 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.949684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.949701 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.949726 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.949745 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:40Z","lastTransitionTime":"2025-11-21T19:02:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:40 crc kubenswrapper[4701]: I1121 19:02:40.949920 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:40 crc kubenswrapper[4701]: E1121 19:02:40.950055 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.053619 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.053674 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.053690 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.053716 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.053734 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.156344 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.156411 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.156431 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.156458 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.156476 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.260082 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.260152 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.260172 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.260194 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.260244 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.363827 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.363899 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.363917 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.363943 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.363960 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.466246 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.466349 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.466368 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.466392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.466409 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.569299 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.569369 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.569386 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.569415 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.569439 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.659038 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.659167 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.659273 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659316 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:13.659281253 +0000 UTC m=+84.444421320 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659356 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.659369 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659471 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659566 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659588 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659485 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:13.659454217 +0000 UTC m=+84.444594274 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659467 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659754 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:13.659727283 +0000 UTC m=+84.444867350 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.659790 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659839 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:13.659825955 +0000 UTC m=+84.444966012 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659911 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659945 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.659965 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.660032 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:13.66001384 +0000 UTC m=+84.445153897 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.672242 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.672311 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.672330 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.672353 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.672370 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.775971 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.776099 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.776119 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.776142 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.776161 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.879612 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.879879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.879904 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.879936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.879961 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.951542 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.952080 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.952302 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.952356 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.952490 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:41 crc kubenswrapper[4701]: E1121 19:02:41.952585 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.983168 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.983285 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.983310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.983339 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:41 crc kubenswrapper[4701]: I1121 19:02:41.983360 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:41Z","lastTransitionTime":"2025-11-21T19:02:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.086570 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.086640 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.086658 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.086684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.086700 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.189988 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.190039 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.190050 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.190067 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.190078 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.293660 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.293733 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.293752 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.293777 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.293797 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.397293 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.397348 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.397361 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.397392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.397407 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.500241 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.500332 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.500363 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.500397 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.500421 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.603811 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.603863 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.603879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.603903 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.603919 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.706775 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.706918 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.706939 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.706963 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.706981 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.810676 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.810750 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.810767 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.810792 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.810809 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.913839 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.913932 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.913950 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.913975 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.913992 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:42Z","lastTransitionTime":"2025-11-21T19:02:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.950675 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:42 crc kubenswrapper[4701]: E1121 19:02:42.951278 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:42 crc kubenswrapper[4701]: I1121 19:02:42.951657 4701 scope.go:117] "RemoveContainer" containerID="ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.017434 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.017503 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.017521 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.017543 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.017561 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.120669 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.120751 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.120857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.120910 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.120935 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.223857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.223912 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.223932 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.223980 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.224003 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.326937 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.327012 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.327032 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.327059 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.327077 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.431695 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.431753 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.431772 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.431802 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.431824 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.439390 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/1.log" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.445029 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.445836 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.478965 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.501346 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.517352 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.534431 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.534478 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.534490 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.534508 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.534521 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.541366 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.562816 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.581715 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.605710 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.622465 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.638594 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.638696 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.638724 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.638763 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.638791 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.640294 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.655060 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.681884 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.702733 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.721211 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.737531 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.741368 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.741412 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.741421 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.741445 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.741455 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.754482 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.771323 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.787322 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:43Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.844450 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.844537 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.844562 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.844601 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.844628 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.947015 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.947079 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.947093 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.947113 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.947126 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:43Z","lastTransitionTime":"2025-11-21T19:02:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.950590 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.950642 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:43 crc kubenswrapper[4701]: I1121 19:02:43.950678 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:43 crc kubenswrapper[4701]: E1121 19:02:43.950767 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:43 crc kubenswrapper[4701]: E1121 19:02:43.950854 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:43 crc kubenswrapper[4701]: E1121 19:02:43.950906 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.049590 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.049654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.049664 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.049682 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.049696 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.153014 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.153077 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.153096 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.153120 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.153137 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.259818 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.259913 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.259950 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.259984 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.260011 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.363746 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.363812 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.363832 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.363861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.363881 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.451786 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/2.log" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.452929 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/1.log" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.457713 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" exitCode=1 Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.457933 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.458013 4701 scope.go:117] "RemoveContainer" containerID="ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.458844 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:02:44 crc kubenswrapper[4701]: E1121 19:02:44.459120 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.466224 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.466352 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.466373 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.466399 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.466417 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.482571 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.502112 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.524409 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.547128 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.568609 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.569257 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.569307 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.569324 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.569348 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.569368 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.600968 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae34837e00965e7a23b95ae521b1ef20762594ea5d8fb890db85e076abfb474c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:27Z\\\",\\\"message\\\":\\\"milyPolicy:*SingleStack,ClusterIPs:[10.217.5.34],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 19:02:27.438930 6181 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:27Z is after 2025-08-24T17:21:41Z]\\\\nI1121 19:02:27.438940 6181 lb_config.go:1031] Cluster endpoints for openshift-ingress-canary/ingress-canary for network=default are: map[]\\\\nI1121 19:02:27.438818 6181 obj_retry.go:434] periodicallyRetryResources: Re\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.619851 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.636435 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.654844 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.670123 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.672370 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.672443 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.672467 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.672498 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.672523 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.692692 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.714287 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.732853 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.752496 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.770013 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.775989 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.776031 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.776042 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.776062 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.776074 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.788801 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.807691 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:44Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.880943 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.881014 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.881039 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.881072 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.881093 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.950919 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:44 crc kubenswrapper[4701]: E1121 19:02:44.951241 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.984630 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.984704 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.984723 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.984753 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:44 crc kubenswrapper[4701]: I1121 19:02:44.984773 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:44Z","lastTransitionTime":"2025-11-21T19:02:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.087785 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.087845 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.087863 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.087888 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.087906 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.191744 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.191813 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.191831 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.191861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.191878 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.295065 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.295122 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.295140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.295166 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.295183 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.398437 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.398513 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.398531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.398558 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.398574 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.464702 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/2.log" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.470710 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:02:45 crc kubenswrapper[4701]: E1121 19:02:45.470971 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.494372 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.509446 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.509520 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.509540 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.509567 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.509592 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.517805 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.536963 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.555968 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.576037 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.593998 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.612842 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.612931 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.612949 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.613008 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.613026 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.623128 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.639118 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.654627 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.673829 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.694926 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.712273 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.716359 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.716421 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.716439 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.716465 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.716483 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.736368 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.760528 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.781566 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.800808 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.819474 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:45Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.820271 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.820354 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.820376 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.820409 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.820437 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.910120 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:45 crc kubenswrapper[4701]: E1121 19:02:45.910474 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:45 crc kubenswrapper[4701]: E1121 19:02:45.910700 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:01.910647057 +0000 UTC m=+72.695787244 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.923648 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.923719 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.923742 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.923766 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.923784 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:45Z","lastTransitionTime":"2025-11-21T19:02:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.951709 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:45 crc kubenswrapper[4701]: E1121 19:02:45.951908 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.952259 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:45 crc kubenswrapper[4701]: E1121 19:02:45.952371 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:45 crc kubenswrapper[4701]: I1121 19:02:45.952812 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:45 crc kubenswrapper[4701]: E1121 19:02:45.952928 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.026661 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.026741 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.026764 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.026797 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.026824 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.075460 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.075529 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.075553 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.075584 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.075611 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.096727 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:46Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.102645 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.102759 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.102780 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.102808 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.102826 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.126763 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:46Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.132105 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.132175 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.132230 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.132267 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.132292 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.154347 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:46Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.163059 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.163122 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.163140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.163166 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.163184 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.184535 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:46Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.195255 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.195353 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.195379 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.195412 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.195436 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.216160 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:46Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.216433 4701 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.219299 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.219363 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.219391 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.219422 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.219445 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.321970 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.322039 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.322062 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.322122 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.322146 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.425315 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.425379 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.425401 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.425425 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.425443 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.528193 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.528285 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.528457 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.528484 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.528502 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.632027 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.632098 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.632116 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.632146 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.632166 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.735306 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.735373 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.735390 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.735416 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.735434 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.838801 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.838859 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.838876 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.838899 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.838916 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.941899 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.941986 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.942008 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.942040 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.942063 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:46Z","lastTransitionTime":"2025-11-21T19:02:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:46 crc kubenswrapper[4701]: I1121 19:02:46.950344 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:46 crc kubenswrapper[4701]: E1121 19:02:46.950564 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.045032 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.045137 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.045161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.045188 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.045245 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.148402 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.148478 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.148530 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.148563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.148584 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.252244 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.252329 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.252353 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.252385 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.252411 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.355827 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.355909 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.355930 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.355954 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.355973 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.459661 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.459741 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.459759 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.459782 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.459799 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.563609 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.563739 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.563761 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.563784 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.563803 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.666715 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.666772 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.666788 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.666814 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.666831 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.769665 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.769721 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.769738 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.769758 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.769776 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.873159 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.873277 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.873307 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.873335 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.873357 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.950019 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.950081 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.950090 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:47 crc kubenswrapper[4701]: E1121 19:02:47.950271 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:47 crc kubenswrapper[4701]: E1121 19:02:47.950433 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:47 crc kubenswrapper[4701]: E1121 19:02:47.950605 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.975839 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.975916 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.975939 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.975969 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:47 crc kubenswrapper[4701]: I1121 19:02:47.975992 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:47Z","lastTransitionTime":"2025-11-21T19:02:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.080580 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.080654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.080681 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.080728 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.080752 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.184420 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.184486 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.184503 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.184527 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.184545 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.287635 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.287705 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.287724 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.287750 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.287768 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.390507 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.390593 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.390617 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.390649 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.390672 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.493364 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.493426 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.493445 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.493468 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.493485 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.597050 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.597101 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.597117 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.597141 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.597161 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.700472 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.700571 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.700590 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.700613 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.700631 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.804851 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.804967 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.804992 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.805023 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.805045 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.907538 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.907616 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.907635 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.907660 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.907678 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:48Z","lastTransitionTime":"2025-11-21T19:02:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:48 crc kubenswrapper[4701]: I1121 19:02:48.950989 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:48 crc kubenswrapper[4701]: E1121 19:02:48.951240 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.010496 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.010551 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.010568 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.010591 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.010609 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.113617 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.113668 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.113682 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.113703 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.113718 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.217647 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.217709 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.217729 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.217758 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.217779 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.321256 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.321326 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.321347 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.321371 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.321389 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.424291 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.424388 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.424405 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.424432 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.424452 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.526791 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.526836 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.526848 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.526865 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.526877 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.629167 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.629277 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.629301 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.629328 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.629350 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.732746 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.732815 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.732833 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.732864 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.732886 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.835368 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.835455 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.835474 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.835497 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.835513 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.938580 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.938898 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.938917 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.938941 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.938975 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:49Z","lastTransitionTime":"2025-11-21T19:02:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.950380 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.950403 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:49 crc kubenswrapper[4701]: E1121 19:02:49.950588 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.950621 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:49 crc kubenswrapper[4701]: E1121 19:02:49.950783 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:49 crc kubenswrapper[4701]: E1121 19:02:49.950886 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.976041 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:49Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:49 crc kubenswrapper[4701]: I1121 19:02:49.992986 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:49Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.007050 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.021047 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.035642 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.041265 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.041325 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.041341 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.041364 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.041382 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.052940 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.071339 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.090501 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.118137 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.135362 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.144154 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.144235 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.144252 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.144275 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.144293 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.153407 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.166669 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.191736 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.210013 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.229241 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.244728 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.247027 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.247090 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.247109 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.247133 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.247152 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.259873 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:50Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.349803 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.349885 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.349909 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.349937 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.349959 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.452965 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.453043 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.453069 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.453102 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.453125 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.556541 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.556602 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.556620 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.556643 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.556659 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.660400 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.660464 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.660481 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.660509 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.660526 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.763766 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.763831 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.763853 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.763883 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.763906 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.867446 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.867531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.867554 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.867586 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.867607 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.950768 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:50 crc kubenswrapper[4701]: E1121 19:02:50.951080 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.970392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.970441 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.970455 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.970473 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:50 crc kubenswrapper[4701]: I1121 19:02:50.970484 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:50Z","lastTransitionTime":"2025-11-21T19:02:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.073908 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.073974 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.073992 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.074021 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.074042 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.177556 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.177638 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.177659 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.177684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.177701 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.280286 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.280359 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.280375 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.280397 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.280411 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.383787 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.383857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.383869 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.383891 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.383905 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.486852 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.486964 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.486983 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.487014 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.487037 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.590106 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.590243 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.590264 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.590291 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.590312 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.695286 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.695367 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.695391 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.695421 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.695453 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.799183 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.799292 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.799310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.799336 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.799356 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.902807 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.902902 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.902916 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.902938 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.902952 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:51Z","lastTransitionTime":"2025-11-21T19:02:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.950761 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.950851 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:51 crc kubenswrapper[4701]: E1121 19:02:51.950966 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:51 crc kubenswrapper[4701]: I1121 19:02:51.951071 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:51 crc kubenswrapper[4701]: E1121 19:02:51.951282 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:51 crc kubenswrapper[4701]: E1121 19:02:51.951381 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.005559 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.005611 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.005628 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.005652 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.005670 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.108849 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.108930 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.108942 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.108985 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.108998 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.212321 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.212392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.212412 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.212441 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.212460 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.315535 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.315593 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.315609 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.315632 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.315649 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.418819 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.418892 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.418909 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.418932 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.418951 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.521187 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.521254 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.521265 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.521281 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.521295 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.624748 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.624817 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.624834 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.624858 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.624876 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.728232 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.728300 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.728319 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.728342 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.728360 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.832161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.832270 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.832298 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.832327 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.832345 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.934793 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.934857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.934873 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.934895 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.934912 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:52Z","lastTransitionTime":"2025-11-21T19:02:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:52 crc kubenswrapper[4701]: I1121 19:02:52.950128 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:52 crc kubenswrapper[4701]: E1121 19:02:52.950341 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.038104 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.038165 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.038183 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.038232 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.038255 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.141797 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.141868 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.141886 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.141910 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.141927 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.244706 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.244800 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.244820 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.244844 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.244862 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.347875 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.347943 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.347959 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.347981 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.347999 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.451967 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.452039 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.452061 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.452092 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.452114 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.555296 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.555355 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.555377 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.555404 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.555428 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.658156 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.658241 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.658258 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.658281 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.658298 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.761654 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.761724 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.761747 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.761776 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.761798 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.864277 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.864336 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.864353 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.864375 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.864391 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.950939 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.950986 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:53 crc kubenswrapper[4701]: E1121 19:02:53.951275 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.951313 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:53 crc kubenswrapper[4701]: E1121 19:02:53.951510 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:53 crc kubenswrapper[4701]: E1121 19:02:53.951729 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.967617 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.967650 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.967659 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.967673 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:53 crc kubenswrapper[4701]: I1121 19:02:53.967682 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:53Z","lastTransitionTime":"2025-11-21T19:02:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.070798 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.070862 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.070879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.070902 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.070920 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.173569 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.173622 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.173641 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.173663 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.173678 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.276144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.276194 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.276231 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.276247 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.276259 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.378721 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.378767 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.378778 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.378795 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.378806 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.481635 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.481717 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.481729 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.481742 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.481752 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.583713 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.583749 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.583783 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.583799 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.583810 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.686091 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.686154 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.686166 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.686180 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.686189 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.788859 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.788923 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.788938 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.788954 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.788988 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.891054 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.891085 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.891094 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.891109 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.891121 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.950844 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:54 crc kubenswrapper[4701]: E1121 19:02:54.950964 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.993809 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.993865 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.993879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.993893 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:54 crc kubenswrapper[4701]: I1121 19:02:54.993904 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:54Z","lastTransitionTime":"2025-11-21T19:02:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.095370 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.095402 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.095410 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.095422 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.095430 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.198745 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.198781 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.198792 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.198805 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.198816 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.301576 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.301714 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.301727 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.301747 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.301760 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.404921 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.404964 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.404974 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.404990 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.404999 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.507294 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.507347 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.507363 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.507387 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.507405 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.609854 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.609902 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.609920 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.609938 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.609950 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.712745 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.712807 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.712824 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.712847 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.712867 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.814836 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.814878 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.814889 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.814911 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.814923 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.918294 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.918351 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.918368 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.918392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.918412 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:55Z","lastTransitionTime":"2025-11-21T19:02:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.950686 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.950785 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:55 crc kubenswrapper[4701]: E1121 19:02:55.950852 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:55 crc kubenswrapper[4701]: I1121 19:02:55.950891 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:55 crc kubenswrapper[4701]: E1121 19:02:55.951046 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:55 crc kubenswrapper[4701]: E1121 19:02:55.951116 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.020785 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.020845 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.020863 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.020885 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.020902 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.122983 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.123045 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.123063 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.123087 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.123103 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.226657 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.226725 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.226752 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.226776 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.226793 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.273570 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.273618 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.273634 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.273657 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.273674 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.290026 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:56Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.298817 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.298851 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.298859 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.298872 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.298882 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.309707 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:56Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.314068 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.314095 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.314102 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.314113 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.314121 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.325646 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:56Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.329691 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.329720 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.329732 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.329747 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.329760 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.339865 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:56Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.342897 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.342945 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.342961 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.342984 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.343004 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.354876 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:56Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.355090 4701 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.356707 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.356790 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.356811 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.356834 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.356852 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.459185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.459314 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.459331 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.459357 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.459374 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.562274 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.562333 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.562349 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.562376 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.562396 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.665162 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.665266 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.665288 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.665312 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.665330 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.767563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.767642 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.767662 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.767695 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.767721 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.870873 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.870936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.870958 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.870984 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.871006 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.950236 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:56 crc kubenswrapper[4701]: E1121 19:02:56.950409 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.973709 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.973739 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.973748 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.973761 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:56 crc kubenswrapper[4701]: I1121 19:02:56.973770 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:56Z","lastTransitionTime":"2025-11-21T19:02:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.075981 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.076018 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.076029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.076045 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.076055 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.178472 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.178521 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.178535 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.178551 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.178563 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.281239 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.281314 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.281327 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.281341 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.281352 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.383813 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.383882 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.383898 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.383919 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.383936 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.486316 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.486379 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.486400 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.486428 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.486449 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.589362 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.589425 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.589449 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.589477 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.589498 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.692369 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.692403 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.692411 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.692423 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.692433 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.795089 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.795144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.795161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.795185 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.795229 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.897645 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.897744 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.897777 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.897804 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.897822 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:57Z","lastTransitionTime":"2025-11-21T19:02:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.950369 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.950455 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:57 crc kubenswrapper[4701]: I1121 19:02:57.950385 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:57 crc kubenswrapper[4701]: E1121 19:02:57.950562 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:57 crc kubenswrapper[4701]: E1121 19:02:57.950693 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:57 crc kubenswrapper[4701]: E1121 19:02:57.950816 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.000904 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.000963 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.000978 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.001001 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.001019 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.105618 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.105729 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.105743 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.105762 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.105777 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.209770 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.209808 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.209818 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.209834 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.209850 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.313101 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.313172 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.313232 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.313265 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.313289 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.415981 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.416020 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.416030 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.416045 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.416055 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.518719 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.518872 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.518942 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.518971 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.518988 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.621715 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.621760 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.621768 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.621782 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.621793 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.725006 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.725075 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.725091 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.725115 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.725137 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.828564 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.828627 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.828644 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.828668 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.828685 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.931520 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.931581 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.931599 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.931623 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.931642 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:58Z","lastTransitionTime":"2025-11-21T19:02:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:58 crc kubenswrapper[4701]: I1121 19:02:58.950841 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:02:58 crc kubenswrapper[4701]: E1121 19:02:58.951023 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.034938 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.035013 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.035031 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.035056 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.035074 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.139187 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.139249 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.139258 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.139270 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.139279 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.242508 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.242558 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.242574 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.242596 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.242613 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.345897 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.345959 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.345975 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.345998 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.346015 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.449111 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.449160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.449177 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.449231 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.449249 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.552510 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.552554 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.552565 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.552582 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.552593 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.655337 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.655389 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.655403 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.655420 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.655432 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.757790 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.757867 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.757879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.757895 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.757904 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.861148 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.861247 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.861265 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.861289 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.861310 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.951163 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:02:59 crc kubenswrapper[4701]: E1121 19:02:59.951376 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.951646 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:02:59 crc kubenswrapper[4701]: E1121 19:02:59.951733 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.953036 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:02:59 crc kubenswrapper[4701]: E1121 19:02:59.953405 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.953770 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:02:59 crc kubenswrapper[4701]: E1121 19:02:59.953922 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.964665 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.964701 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.964709 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.964722 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.964732 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:02:59Z","lastTransitionTime":"2025-11-21T19:02:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.970358 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:59Z is after 2025-08-24T17:21:41Z" Nov 21 19:02:59 crc kubenswrapper[4701]: I1121 19:02:59.987543 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:02:59Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.004891 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.020908 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.040464 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.061963 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.067469 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.067514 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.067525 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.067544 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.067558 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.093065 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.109935 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.125687 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.140413 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.153609 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.168857 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.170799 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.170843 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.170861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.170886 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.170903 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.185733 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.201285 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.218060 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.230654 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.244865 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:00Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.274315 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.274368 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.274384 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.274411 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.274430 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.377526 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.378388 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.378435 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.378464 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.378484 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.481531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.481578 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.481590 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.481606 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.481618 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.584911 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.584959 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.584976 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.584998 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.585014 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.687957 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.688021 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.688059 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.688089 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.688110 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.791416 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.791452 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.791461 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.791474 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.791483 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.894627 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.894702 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.894727 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.894755 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.894776 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.950653 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:00 crc kubenswrapper[4701]: E1121 19:03:00.950842 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.997140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.997244 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.997268 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.997301 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:00 crc kubenswrapper[4701]: I1121 19:03:00.997324 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:00Z","lastTransitionTime":"2025-11-21T19:03:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.100638 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.100698 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.100715 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.100740 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.100757 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.204023 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.204097 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.204120 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.204148 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.204170 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.307081 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.307129 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.307140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.307156 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.307167 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.408957 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.409081 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.409123 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.409143 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.409188 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.511521 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.511558 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.511566 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.511580 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.511591 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.614440 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.614494 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.614509 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.614532 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.614549 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.717029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.717099 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.717115 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.717140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.717159 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.819995 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.820049 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.820067 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.820090 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.820107 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.923063 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.923113 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.923126 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.923144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.923157 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:01Z","lastTransitionTime":"2025-11-21T19:03:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.952552 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.952578 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:01 crc kubenswrapper[4701]: E1121 19:03:01.952767 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.952877 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:01 crc kubenswrapper[4701]: E1121 19:03:01.953012 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:01 crc kubenswrapper[4701]: E1121 19:03:01.953464 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:01 crc kubenswrapper[4701]: I1121 19:03:01.990944 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:01 crc kubenswrapper[4701]: E1121 19:03:01.991268 4701 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:03:01 crc kubenswrapper[4701]: E1121 19:03:01.991363 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs podName:73831ccf-a071-4135-b8bf-ee1b9b3c2cd1 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:33.991330274 +0000 UTC m=+104.776470341 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs") pod "network-metrics-daemon-q5n7s" (UID: "73831ccf-a071-4135-b8bf-ee1b9b3c2cd1") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.025767 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.025808 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.025821 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.025838 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.025850 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.128339 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.128408 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.128425 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.128450 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.128468 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.231970 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.232035 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.232053 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.232079 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.232099 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.334936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.335003 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.335032 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.335064 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.335087 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.437096 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.437174 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.437231 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.437264 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.437287 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.539995 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.540051 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.540059 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.540074 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.540084 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.642271 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.642329 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.642350 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.642379 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.642404 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.745029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.745097 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.745114 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.745139 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.745159 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.847478 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.847554 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.847581 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.847612 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.847633 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.949884 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:02 crc kubenswrapper[4701]: E1121 19:03:02.950105 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.950432 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.950534 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.950552 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.950574 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:02 crc kubenswrapper[4701]: I1121 19:03:02.950591 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:02Z","lastTransitionTime":"2025-11-21T19:03:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.054310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.054370 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.054387 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.054411 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.054428 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.157667 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.157713 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.157725 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.157743 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.157756 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.260545 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.260611 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.260631 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.260655 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.260673 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.364011 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.364094 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.364119 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.364148 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.364168 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.466842 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.466899 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.466915 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.466938 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.466955 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.539692 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/0.log" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.539765 4701 generic.go:334] "Generic (PLEG): container finished" podID="2eababf7-b5d3-4479-9ad5-f1060898f324" containerID="afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456" exitCode=1 Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.539804 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kf9jq" event={"ID":"2eababf7-b5d3-4479-9ad5-f1060898f324","Type":"ContainerDied","Data":"afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.540325 4701 scope.go:117] "RemoveContainer" containerID="afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.558805 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.570421 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.570465 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.570483 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.570506 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.570522 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.579194 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.595738 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.620036 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.641181 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.656816 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.682109 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.682184 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.682231 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.682266 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.682300 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.692644 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.709875 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.726288 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.743838 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.764772 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.781914 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.786771 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.786809 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.786819 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.786833 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.786844 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.803354 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:03:02Z\\\",\\\"message\\\":\\\"2025-11-21T19:02:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b61266e2-9125-4c3b-b893-41381319a94d\\\\n2025-11-21T19:02:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b61266e2-9125-4c3b-b893-41381319a94d to /host/opt/cni/bin/\\\\n2025-11-21T19:02:17Z [verbose] multus-daemon started\\\\n2025-11-21T19:02:17Z [verbose] Readiness Indicator file check\\\\n2025-11-21T19:03:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.821713 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.839879 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.855676 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.880091 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:03Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.889121 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.889162 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.889171 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.889186 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.889213 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.950878 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.951016 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.951153 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:03 crc kubenswrapper[4701]: E1121 19:03:03.951037 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:03 crc kubenswrapper[4701]: E1121 19:03:03.951295 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:03 crc kubenswrapper[4701]: E1121 19:03:03.951454 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.991908 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.992021 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.992051 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.992091 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:03 crc kubenswrapper[4701]: I1121 19:03:03.992117 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:03Z","lastTransitionTime":"2025-11-21T19:03:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.095283 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.095366 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.095392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.095422 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.095441 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.198865 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.198924 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.198942 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.198967 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.198985 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.302930 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.303042 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.303061 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.303125 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.303145 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.406156 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.406310 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.406422 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.406452 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.406470 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.510029 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.510076 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.510090 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.510110 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.510122 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.546355 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/0.log" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.546435 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kf9jq" event={"ID":"2eababf7-b5d3-4479-9ad5-f1060898f324","Type":"ContainerStarted","Data":"d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.571274 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:03:02Z\\\",\\\"message\\\":\\\"2025-11-21T19:02:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b61266e2-9125-4c3b-b893-41381319a94d\\\\n2025-11-21T19:02:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b61266e2-9125-4c3b-b893-41381319a94d to /host/opt/cni/bin/\\\\n2025-11-21T19:02:17Z [verbose] multus-daemon started\\\\n2025-11-21T19:02:17Z [verbose] Readiness Indicator file check\\\\n2025-11-21T19:03:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:03:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.592876 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.612286 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.613895 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.613953 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.613972 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.613996 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.614012 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.627484 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.650689 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.672656 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.693264 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.709275 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.716807 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.716857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.716874 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.716896 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.716915 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.731300 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.751332 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.767439 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.797111 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.812801 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.820089 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.820133 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.820144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.820162 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.820174 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.829929 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.847707 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.867181 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.884624 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:04Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.922935 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.922999 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.923018 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.923042 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.923060 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:04Z","lastTransitionTime":"2025-11-21T19:03:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:04 crc kubenswrapper[4701]: I1121 19:03:04.950707 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:04 crc kubenswrapper[4701]: E1121 19:03:04.950869 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.025397 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.025840 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.026020 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.026160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.026388 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.130287 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.130358 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.130377 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.130403 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.130421 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.233742 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.233807 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.233825 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.233851 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.233869 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.336450 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.336524 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.336533 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.336548 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.336559 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.439911 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.439971 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.439991 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.440016 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.440033 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.542526 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.542588 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.542606 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.542630 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.542648 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.645983 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.646059 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.646076 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.646101 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.646118 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.749287 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.749348 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.749366 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.749392 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.749409 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.852634 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.852713 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.852732 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.852757 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.852774 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.951552 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.951744 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:05 crc kubenswrapper[4701]: E1121 19:03:05.951914 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:05 crc kubenswrapper[4701]: E1121 19:03:05.951748 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.951552 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:05 crc kubenswrapper[4701]: E1121 19:03:05.952093 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.955482 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.955658 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.955699 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.955730 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:05 crc kubenswrapper[4701]: I1121 19:03:05.955750 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:05Z","lastTransitionTime":"2025-11-21T19:03:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.059117 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.059178 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.059195 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.059253 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.059271 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.162354 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.162443 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.162495 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.162518 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.162535 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.265736 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.265927 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.266019 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.266054 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.266106 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.368866 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.368917 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.368934 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.368958 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.368976 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.471461 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.471549 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.471566 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.471592 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.471640 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.573997 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.574043 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.574060 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.574081 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.574099 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.585739 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.585779 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.585892 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.585917 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.585952 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.606666 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:06Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.612112 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.612175 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.612194 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.612271 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.612294 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.630773 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:06Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.635740 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.635817 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.635834 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.635878 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.635896 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.655634 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:06Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.662179 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.662305 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.662362 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.662386 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.662404 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.681618 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:06Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.686962 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.687051 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.687073 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.687100 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.687121 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.707486 4701 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3bda9678-f6a5-4de4-acaa-3527a0be80fa\\\",\\\"systemUUID\\\":\\\"5ab738c4-0d34-41bd-a531-77773953d838\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:06Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.707715 4701 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.709629 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.709710 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.709728 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.709776 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.709792 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.813160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.813262 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.813274 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.813320 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.813339 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.918252 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.918509 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.918573 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.918605 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.918672 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:06Z","lastTransitionTime":"2025-11-21T19:03:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:06 crc kubenswrapper[4701]: I1121 19:03:06.950637 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:06 crc kubenswrapper[4701]: E1121 19:03:06.950844 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.023755 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.023851 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.023903 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.023929 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.023949 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.127610 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.127666 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.127686 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.127711 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.127730 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.233553 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.233630 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.233653 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.233687 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.233711 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.337162 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.337277 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.337301 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.337325 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.337342 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.440542 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.440967 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.440995 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.441020 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.441040 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.543500 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.543566 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.543584 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.543610 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.543632 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.646253 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.646317 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.646335 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.646359 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.646376 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.749236 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.749305 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.749323 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.749349 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.749371 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.852787 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.852845 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.852861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.852884 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.852901 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.950508 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.950571 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.950510 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:07 crc kubenswrapper[4701]: E1121 19:03:07.950704 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:07 crc kubenswrapper[4701]: E1121 19:03:07.950808 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:07 crc kubenswrapper[4701]: E1121 19:03:07.951013 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.957232 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.957281 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.957299 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.957321 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:07 crc kubenswrapper[4701]: I1121 19:03:07.957337 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:07Z","lastTransitionTime":"2025-11-21T19:03:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.060353 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.060435 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.060462 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.060493 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.060516 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.162820 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.162888 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.162907 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.162936 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.162953 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.266190 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.266292 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.266308 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.266332 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.266354 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.369729 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.369781 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.369792 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.369808 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.369820 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.472550 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.472640 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.472699 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.472726 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.472748 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.576075 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.576140 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.576164 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.576191 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.576248 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.679169 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.679308 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.679338 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.679365 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.679383 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.783899 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.783948 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.783964 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.783988 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.784006 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.886584 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.886640 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.886657 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.886681 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.886698 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.950879 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:08 crc kubenswrapper[4701]: E1121 19:03:08.951088 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.989757 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.989831 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.989850 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.989874 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:08 crc kubenswrapper[4701]: I1121 19:03:08.989893 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:08Z","lastTransitionTime":"2025-11-21T19:03:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.092788 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.092861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.092879 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.092903 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.092922 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.195803 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.195841 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.195849 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.195862 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.195873 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.299067 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.299141 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.299158 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.299181 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.299228 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.402409 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.402461 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.402473 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.402488 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.402499 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.505089 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.505149 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.505165 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.505188 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.505231 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.608076 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.608144 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.608161 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.608227 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.608246 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.711992 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.712048 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.712067 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.712101 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.712118 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.814733 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.814777 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.814790 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.814806 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.814818 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.917789 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.917854 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.917871 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.917895 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.917912 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:09Z","lastTransitionTime":"2025-11-21T19:03:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.950717 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.950769 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.950716 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:09 crc kubenswrapper[4701]: E1121 19:03:09.950884 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:09 crc kubenswrapper[4701]: E1121 19:03:09.951008 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:09 crc kubenswrapper[4701]: E1121 19:03:09.951128 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.976570 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47f8719285335b7c24c904cb48584429a7c5726e5991aa8788a08be5569eb16e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b53c12977fcf9c264091b4989cc401e21f6952d9e0dd3cba49523d00586502e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:09Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:09 crc kubenswrapper[4701]: I1121 19:03:09.994735 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:09Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.009148 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n54wn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:29Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q5n7s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.020961 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.021035 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.021057 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.021082 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.021103 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.025598 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c471e82-6f14-4fda-b2c8-cf10b8305aa3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad01dd99a2cd4d1d293cd0c4c449212bda139cfd0285d3d3ea7d6cb8a8d2b612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1e0d1ae840d3070be9c70177816ce2d7d39bc3879872bb2c26528524bdf617\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f94e78f07fef839aefd6dac7926308544f2a6c2bf7c79aef0544316d391e3c81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dfdc1fbfc3ab05b19e184a8ecbee1aaace51124a05ef0c6a72644c56ff901b1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b3184432d7690e76fa1e82c4dd4495aba3243724cffc8c5e06aa2698fb4073b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T19:02:03Z\\\",\\\"message\\\":\\\"W1121 19:01:53.215001 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 19:01:53.215454 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763751713 cert, and key in /tmp/serving-cert-345992687/serving-signer.crt, /tmp/serving-cert-345992687/serving-signer.key\\\\nI1121 19:01:53.540564 1 observer_polling.go:159] Starting file observer\\\\nW1121 19:01:53.543785 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 19:01:53.544180 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 19:01:53.545946 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-345992687/tls.crt::/tmp/serving-cert-345992687/tls.key\\\\\\\"\\\\nF1121 19:02:03.820868 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39262483026044b62a5a007167a6ae36590c4349f1ac7f7e8fabdd03db5dfc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b0cb8c54d4f69a17e9c40ac768aebccf73abce4b49ea4c0bef2cd0e8687f1819\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.047491 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45e6862c-1f97-44f6-bae7-1f3bcb8a6671\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34c95b36149885bb7733f45329ad8a7014dee06a20e24d8d97a14d66f99bd617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf57b18763e6a1fe98b0f670d5535d21ceffd6effc17331a56dd08d00b43ad1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ff2d56a6e954338aa40e9ccacf6ea72f2dd1e66810cca1441497352ae855378\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2e9513d8c6bc6daee2c300ac66316efe53078196233b31303ccd4719fed6db8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.066065 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a640e57-40c2-4e96-829d-c2ace468c63c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://296a1518b4535368c4d631d1a420e04ef8ce67c3595ad77c9f53089da1c82a49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f295d8d57e61a4a96a2973f461a7bafa92b43432b65abe88e0edddff5be8db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gkqmt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2qmlx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.083324 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f83b02e5-a459-4898-8c7b-3366362593bf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:01:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa3eaa10a4f5d97d32beb3ae165c5482920354befe8671ae1012310f0cc51216\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0492e473455f1ff5bbac682a6cb03808c7acabceac976f63c5921134406f901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b3017c15a14fa52dd2d07aedbee017fee93c6d95d96992386779de9bdbbd7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:01:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09e96a898422e0d1c9e6adcd100761bf262b000ec85367141f258f5a76fd606e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:01:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:01:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:01:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.108530 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af375e82affc29033aa26eb9fedd240f5e572e61d6760f265baf7794d05e6a30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.124107 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.124176 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.124242 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.124270 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.124290 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.136713 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.156580 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd6417be-62d7-4b6a-9711-a89211dca42e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:02:44Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 19:02:44.003721 6385 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 19:02:44.003749 6385 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 19:02:44.003769 6385 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 19:02:44.003793 6385 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 19:02:44.003797 6385 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 19:02:44.003812 6385 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 19:02:44.003824 6385 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 19:02:44.003829 6385 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 19:02:44.003857 6385 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 19:02:44.003869 6385 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 19:02:44.003878 6385 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 19:02:44.003925 6385 factory.go:656] Stopping watch factory\\\\nI1121 19:02:44.003947 6385 ovnkube.go:599] Stopped ovnkube\\\\nI1121 19:02:44.003948 6385 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 19:02:44.004011 6385 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zzdxm_openshift-ovn-kubernetes(cd6417be-62d7-4b6a-9711-a89211dca42e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ccmf4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zzdxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.172290 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e70a068b-c06b-4ffe-8496-6f55c321d614\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b6a8b6f55f76ffe5d5f9997137285e639ae17fda481325198a8561d79393480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mlhlf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-tbszf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.190637 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-n6w8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"57baab98-95f2-4dff-94ff-a296ffe8a418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fec1e2980b38c9ac8c023bc67c0c4a17c7a7e47d88a78ffa6e72562410d0131e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clkbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-n6w8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.213063 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.227387 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.227512 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.227531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.227555 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.227572 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.235643 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1dfcb4581fb2badd2b6efb29b6b52532f56e17195905c268c7b25710ffe85437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.251443 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hb64h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c769c5d1-60d9-43e1-b130-4373c7eae670\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07ec62beec4f7c4a8cc1504df02a84665027e8c508d74022202e41f529ef9d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rz7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hb64h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.276518 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ea0e20e-ab13-4b90-b58a-5b4d377c5ead\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef2e819e1fcd8a87514d3018af92b78391cf38e5bbd149125674b3a09c9ec791\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:02:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afc821398a7d652bdd509e62774723d008a27a9c182193d00ce57cbffaf53d92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf687ba9996eafab02d2e2a87bbecab846aa2038d6233fc418b9dd8d95536c53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51b1dea8d5207193330ded289fc6fbc3c3109e4b719d82a493571cd55f63306d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://357be602c46a81786ddf4101b9943536727e0ea9bfc56e8959dd89a267d2e59d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5979bc0df2f21ea52f977781134b4437e610a3df8769e871c771c2a23a21684f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a720b4e7e78581fe4fa1c4824c79fcdfa4064c84d8bba78ba0c1fe2861ef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T19:02:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kj4cl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xxkwp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.297764 4701 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kf9jq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2eababf7-b5d3-4479-9ad5-f1060898f324\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:02:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T19:03:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T19:03:02Z\\\",\\\"message\\\":\\\"2025-11-21T19:02:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b61266e2-9125-4c3b-b893-41381319a94d\\\\n2025-11-21T19:02:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b61266e2-9125-4c3b-b893-41381319a94d to /host/opt/cni/bin/\\\\n2025-11-21T19:02:17Z [verbose] multus-daemon started\\\\n2025-11-21T19:02:17Z [verbose] Readiness Indicator file check\\\\n2025-11-21T19:03:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T19:02:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T19:03:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5hzjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T19:02:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kf9jq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T19:03:10Z is after 2025-08-24T17:21:41Z" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.330599 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.330667 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.330684 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.330712 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.330732 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.434165 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.434289 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.434311 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.434359 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.434383 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.541568 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.541630 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.541643 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.541663 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.541675 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.645064 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.645115 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.645123 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.645139 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.645151 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.747750 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.747865 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.747924 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.747956 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.748016 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.851191 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.851251 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.851264 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.851282 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.851294 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.950908 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:10 crc kubenswrapper[4701]: E1121 19:03:10.951089 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.954141 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.954271 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.954296 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.954321 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:10 crc kubenswrapper[4701]: I1121 19:03:10.954340 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:10Z","lastTransitionTime":"2025-11-21T19:03:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.057157 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.057268 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.057288 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.057317 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.057336 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.161045 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.161122 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.161146 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.161177 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.161237 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.264464 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.264539 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.264559 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.264582 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.264600 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.368091 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.368173 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.368190 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.368250 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.368269 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.470833 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.470895 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.470912 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.470937 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.470955 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.573720 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.573785 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.573801 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.573823 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.573840 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.677683 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.677760 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.677783 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.677811 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.677831 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.781090 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.781160 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.781181 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.781241 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.781265 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.883874 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.883923 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.883933 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.883948 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.883958 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.950293 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.950375 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.950293 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:11 crc kubenswrapper[4701]: E1121 19:03:11.950462 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:11 crc kubenswrapper[4701]: E1121 19:03:11.950594 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:11 crc kubenswrapper[4701]: E1121 19:03:11.950889 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.986979 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.987044 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.987064 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.987087 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:11 crc kubenswrapper[4701]: I1121 19:03:11.987103 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:11Z","lastTransitionTime":"2025-11-21T19:03:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.090515 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.090553 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.090560 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.090573 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.090583 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.193273 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.193319 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.193335 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.193356 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.193377 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.295591 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.295636 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.295650 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.295667 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.295679 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.398588 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.398633 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.398646 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.398662 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.398673 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.501496 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.501530 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.501541 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.501556 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.501565 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.604129 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.604184 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.604216 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.604235 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.604247 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.707336 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.707393 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.707410 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.707615 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.707632 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.810655 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.810721 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.810739 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.810764 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.810781 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.914084 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.914171 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.914181 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.914210 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.914220 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:12Z","lastTransitionTime":"2025-11-21T19:03:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.950816 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:12 crc kubenswrapper[4701]: E1121 19:03:12.951242 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:12 crc kubenswrapper[4701]: I1121 19:03:12.962463 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.016528 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.016574 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.016584 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.016597 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.016607 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.119504 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.119578 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.119601 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.119632 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.119656 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.223414 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.223477 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.223496 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.223522 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.223540 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.326371 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.326435 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.326451 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.326474 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.326493 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.429951 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.430032 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.430057 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.430089 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.430113 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.533454 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.533520 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.533538 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.533563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.533581 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.637667 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.637728 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.637752 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.637781 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.637805 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.734599 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.734746 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735062 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:04:17.735014306 +0000 UTC m=+148.520154373 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.735126 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.735196 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.735282 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735229 4701 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735576 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:04:17.735547628 +0000 UTC m=+148.520687695 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735357 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735661 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735683 4701 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735743 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 19:04:17.735727311 +0000 UTC m=+148.520867378 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735440 4701 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735797 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 19:04:17.735785303 +0000 UTC m=+148.520925370 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735455 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735828 4701 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735843 4701 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.735882 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 19:04:17.735868905 +0000 UTC m=+148.521008972 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.740740 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.740809 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.740832 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.740861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.740886 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.843955 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.843989 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.843996 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.844008 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.844016 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.947493 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.947590 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.947650 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.947784 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.947845 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:13Z","lastTransitionTime":"2025-11-21T19:03:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.950467 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.950567 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.951165 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.951446 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.951787 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:13 crc kubenswrapper[4701]: E1121 19:03:13.952023 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:13 crc kubenswrapper[4701]: I1121 19:03:13.970087 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.050529 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.050595 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.050614 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.050642 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.050662 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.154406 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.154464 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.154485 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.154510 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.154528 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.256760 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.256833 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.256854 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.256883 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.256904 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.359735 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.359787 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.359799 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.359815 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.359828 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.462507 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.462559 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.462575 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.462597 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.462613 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.565733 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.565809 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.565826 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.565854 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.565872 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.669405 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.669515 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.669531 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.669555 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.669573 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.773255 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.773311 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.773322 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.773339 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.773352 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.875822 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.876440 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.876476 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.876502 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.876520 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.950391 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:14 crc kubenswrapper[4701]: E1121 19:03:14.951086 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.951470 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.980447 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.980514 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.980537 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.980563 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:14 crc kubenswrapper[4701]: I1121 19:03:14.980581 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:14Z","lastTransitionTime":"2025-11-21T19:03:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.084043 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.084319 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.084514 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.084694 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.084863 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.188307 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.188366 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.188383 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.188410 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.188425 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.292408 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.292488 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.292503 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.292522 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.292536 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.398060 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.398115 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.398133 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.398157 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.398174 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.501813 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.501873 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.501892 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.501915 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.501931 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.593009 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/2.log" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.597499 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerStarted","Data":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.598423 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.605608 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.605653 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.605672 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.605694 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.605711 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.646842 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-hb64h" podStartSLOduration=60.646805916 podStartE2EDuration="1m0.646805916s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.645704221 +0000 UTC m=+86.430844298" watchObservedRunningTime="2025-11-21 19:03:15.646805916 +0000 UTC m=+86.431945983" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.702744 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-xxkwp" podStartSLOduration=60.702718326 podStartE2EDuration="1m0.702718326s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.682648807 +0000 UTC m=+86.467788924" watchObservedRunningTime="2025-11-21 19:03:15.702718326 +0000 UTC m=+86.487858393" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.708736 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.708785 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.708800 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.708820 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.708834 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.723052 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-kf9jq" podStartSLOduration=60.72303076 podStartE2EDuration="1m0.72303076s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.702687506 +0000 UTC m=+86.487827573" watchObservedRunningTime="2025-11-21 19:03:15.72303076 +0000 UTC m=+86.508170797" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.790851 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=66.79083188 podStartE2EDuration="1m6.79083188s" podCreationTimestamp="2025-11-21 19:02:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.790425131 +0000 UTC m=+86.575565168" watchObservedRunningTime="2025-11-21 19:03:15.79083188 +0000 UTC m=+86.575971917" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.811817 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.811861 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.811872 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.811886 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.811895 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.814190 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=60.814172359 podStartE2EDuration="1m0.814172359s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.811086142 +0000 UTC m=+86.596226169" watchObservedRunningTime="2025-11-21 19:03:15.814172359 +0000 UTC m=+86.599312396" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.834742 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2qmlx" podStartSLOduration=60.834723858 podStartE2EDuration="1m0.834723858s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.833043171 +0000 UTC m=+86.618183208" watchObservedRunningTime="2025-11-21 19:03:15.834723858 +0000 UTC m=+86.619863895" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.861367 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=3.861352729 podStartE2EDuration="3.861352729s" podCreationTimestamp="2025-11-21 19:03:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.850096503 +0000 UTC m=+86.635236530" watchObservedRunningTime="2025-11-21 19:03:15.861352729 +0000 UTC m=+86.646492766" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.875934 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=37.875919797 podStartE2EDuration="37.875919797s" podCreationTimestamp="2025-11-21 19:02:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.862812041 +0000 UTC m=+86.647952068" watchObservedRunningTime="2025-11-21 19:03:15.875919797 +0000 UTC m=+86.661059834" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.913888 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.914150 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.914317 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.914487 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.914604 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:15Z","lastTransitionTime":"2025-11-21T19:03:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.924094 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podStartSLOduration=60.924072599 podStartE2EDuration="1m0.924072599s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.912848674 +0000 UTC m=+86.697988701" watchObservedRunningTime="2025-11-21 19:03:15.924072599 +0000 UTC m=+86.709212626" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.924897 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podStartSLOduration=60.924891177 podStartE2EDuration="1m0.924891177s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.92457437 +0000 UTC m=+86.709714397" watchObservedRunningTime="2025-11-21 19:03:15.924891177 +0000 UTC m=+86.710031204" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.935386 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-n6w8v" podStartSLOduration=60.935364596 podStartE2EDuration="1m0.935364596s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.934844303 +0000 UTC m=+86.719984330" watchObservedRunningTime="2025-11-21 19:03:15.935364596 +0000 UTC m=+86.720504623" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.950125 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.950179 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.950248 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:15 crc kubenswrapper[4701]: E1121 19:03:15.950690 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:15 crc kubenswrapper[4701]: E1121 19:03:15.950799 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:15 crc kubenswrapper[4701]: E1121 19:03:15.950915 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:15 crc kubenswrapper[4701]: I1121 19:03:15.967906 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.967890525 podStartE2EDuration="2.967890525s" podCreationTimestamp="2025-11-21 19:03:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:15.967717571 +0000 UTC m=+86.752857588" watchObservedRunningTime="2025-11-21 19:03:15.967890525 +0000 UTC m=+86.753030552" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.017166 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.017221 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.017235 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.017251 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.017263 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.119888 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.119928 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.119942 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.119960 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.119972 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.226069 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.226775 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.227297 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.227516 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.227692 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.238963 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-q5n7s"] Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.330631 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.330920 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.331076 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.331210 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.331312 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.435468 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.435557 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.435581 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.435622 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.435645 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.537924 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.537969 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.537981 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.537997 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.538012 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.600691 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:16 crc kubenswrapper[4701]: E1121 19:03:16.600833 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q5n7s" podUID="73831ccf-a071-4135-b8bf-ee1b9b3c2cd1" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.640586 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.640648 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.640670 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.640695 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.640714 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.743791 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.743857 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.743874 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.743896 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.743914 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.847024 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.847473 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.847493 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.847522 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.847538 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.950564 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:16 crc kubenswrapper[4701]: E1121 19:03:16.950773 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.950796 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.950844 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.950869 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.950896 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:16 crc kubenswrapper[4701]: I1121 19:03:16.950918 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:16Z","lastTransitionTime":"2025-11-21T19:03:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.053694 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.053750 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.053761 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.053777 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.053788 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:17Z","lastTransitionTime":"2025-11-21T19:03:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.061800 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.061868 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.061895 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.061923 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.061945 4701 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T19:03:17Z","lastTransitionTime":"2025-11-21T19:03:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.123932 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n"] Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.124438 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.126571 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.127650 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.128606 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.128856 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.169962 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ea175a7b-c1da-4645-9600-ed0b78bbfe30-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.170304 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ea175a7b-c1da-4645-9600-ed0b78bbfe30-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.170417 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea175a7b-c1da-4645-9600-ed0b78bbfe30-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.170562 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ea175a7b-c1da-4645-9600-ed0b78bbfe30-service-ca\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.170676 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea175a7b-c1da-4645-9600-ed0b78bbfe30-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.271568 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ea175a7b-c1da-4645-9600-ed0b78bbfe30-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.271623 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea175a7b-c1da-4645-9600-ed0b78bbfe30-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.271674 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ea175a7b-c1da-4645-9600-ed0b78bbfe30-service-ca\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.271699 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ea175a7b-c1da-4645-9600-ed0b78bbfe30-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.271716 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea175a7b-c1da-4645-9600-ed0b78bbfe30-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.271897 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ea175a7b-c1da-4645-9600-ed0b78bbfe30-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.272015 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ea175a7b-c1da-4645-9600-ed0b78bbfe30-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.273464 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ea175a7b-c1da-4645-9600-ed0b78bbfe30-service-ca\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.280433 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea175a7b-c1da-4645-9600-ed0b78bbfe30-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.303147 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea175a7b-c1da-4645-9600-ed0b78bbfe30-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-ldn5n\" (UID: \"ea175a7b-c1da-4645-9600-ed0b78bbfe30\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.443301 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" Nov 21 19:03:17 crc kubenswrapper[4701]: W1121 19:03:17.462985 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea175a7b_c1da_4645_9600_ed0b78bbfe30.slice/crio-ef06d231208b9f296caea5cd3ce9f0b6126d9b21ebce24e3d67284712ee38bb5 WatchSource:0}: Error finding container ef06d231208b9f296caea5cd3ce9f0b6126d9b21ebce24e3d67284712ee38bb5: Status 404 returned error can't find the container with id ef06d231208b9f296caea5cd3ce9f0b6126d9b21ebce24e3d67284712ee38bb5 Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.605952 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" event={"ID":"ea175a7b-c1da-4645-9600-ed0b78bbfe30","Type":"ContainerStarted","Data":"ef06d231208b9f296caea5cd3ce9f0b6126d9b21ebce24e3d67284712ee38bb5"} Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.950033 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:17 crc kubenswrapper[4701]: I1121 19:03:17.950196 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:17 crc kubenswrapper[4701]: E1121 19:03:17.950290 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 19:03:17 crc kubenswrapper[4701]: E1121 19:03:17.950469 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.503589 4701 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.503814 4701 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.554241 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-8gkzf"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.555820 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.564680 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.569485 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.569943 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.570410 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.571019 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.573060 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.573749 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.574040 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.574349 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.584514 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gdj2w"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.584660 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.587393 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.588889 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-audit-dir\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589013 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-image-import-ca\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589070 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589167 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzvxf\" (UniqueName: \"kubernetes.io/projected/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-kube-api-access-gzvxf\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589292 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-serving-cert\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589346 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-etcd-serving-ca\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589451 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-config\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589483 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-audit\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589567 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-etcd-client\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589636 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-encryption-config\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.589737 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-node-pullsecrets\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.590160 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7zd2w"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.600402 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.602036 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.602304 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.603045 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.608531 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-gzwfn"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.609047 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.609424 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wzsrk"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.609708 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.610393 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.610632 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.613519 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.614070 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.614397 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.614568 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.614737 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.614886 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.615152 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.615355 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.615797 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.616083 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.617724 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.617941 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.618107 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.618242 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.618140 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.618559 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.618846 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.624032 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.625046 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.625311 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.626896 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.627191 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.627340 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.627529 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.627680 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.627345 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.627683 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.628191 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.630431 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.630580 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-hsngh"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.631226 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.631415 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" event={"ID":"ea175a7b-c1da-4645-9600-ed0b78bbfe30","Type":"ContainerStarted","Data":"3ab7ef1acc8d0cc71ec1aed71dfb27f35d79ca7f255c94fec69f1728f2fbbd5b"} Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.639150 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.639665 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.639754 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.640021 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.647709 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.647985 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.648121 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.648244 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.648377 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.649115 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.649151 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.649322 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.649647 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.650062 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.650181 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.650276 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.650352 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.650488 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.650608 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.665903 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.666614 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.672749 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.672986 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.673698 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.674355 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.674454 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.675405 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.677070 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.677958 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.678318 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.679068 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.680848 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-scrql"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.686222 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.690333 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.691509 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.691949 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.692361 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.692586 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693377 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca30ea3d-b3d5-478c-a154-284e721664d7-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693739 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-node-pullsecrets\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693792 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-encryption-config\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693811 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-audit-dir\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693833 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxnqh\" (UniqueName: \"kubernetes.io/projected/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-kube-api-access-rxnqh\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693878 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-image-import-ca\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693897 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca30ea3d-b3d5-478c-a154-284e721664d7-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693914 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-trusted-ca\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693951 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693967 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-serving-cert\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.693989 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-etcd-client\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694026 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-metrics-tls\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694070 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l54lr\" (UniqueName: \"kubernetes.io/projected/ca30ea3d-b3d5-478c-a154-284e721664d7-kube-api-access-l54lr\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694136 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzvxf\" (UniqueName: \"kubernetes.io/projected/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-kube-api-access-gzvxf\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694188 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-etcd-serving-ca\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694240 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694261 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-config\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694276 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-audit\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.694407 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-node-pullsecrets\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.695415 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.695855 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-audit-dir\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.696690 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-image-import-ca\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.697008 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.697176 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.697645 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-encryption-config\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.698071 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-etcd-serving-ca\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.698326 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.698527 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.698724 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.698745 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-audit\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.698530 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-config\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.701682 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704227 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704406 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704545 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704646 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704739 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704835 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704947 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.704999 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-etcd-client\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.705151 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.705348 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.705579 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.706597 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v6w6b"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.707633 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.710079 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.710547 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zmch7"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.710922 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.711383 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.711740 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.714421 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.714711 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.714764 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.715236 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.715239 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.715563 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.716972 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.717566 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.717864 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.718465 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.718654 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.719087 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.720638 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.721133 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-cwddx"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.721445 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.721646 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.722393 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.722677 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.722915 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723022 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723141 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723146 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723443 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723538 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723546 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723627 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.723712 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.725329 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-serving-cert\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.725956 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.726756 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.728791 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.728956 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.729783 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-hbp5b"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.729980 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.737116 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.750938 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzvxf\" (UniqueName: \"kubernetes.io/projected/3f95c2f8-00de-4a99-a573-3c5ccea86d5f-kube-api-access-gzvxf\") pod \"apiserver-76f77b778f-8gkzf\" (UID: \"3f95c2f8-00de-4a99-a573-3c5ccea86d5f\") " pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.751362 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.751361 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.751999 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.753408 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.753894 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.754285 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.755040 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2b8wr"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.762631 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-twmdp"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.763251 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.763559 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.766316 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.768704 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.771251 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rfbjh"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.773088 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.775006 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-z6z69"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.777475 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.778739 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.779819 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.780686 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.784474 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.785807 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.809259 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.810230 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.810577 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.811175 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.811568 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.811739 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812553 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gdj2w"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812618 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cfa749f0-83ce-4ba6-8a3e-e43257bdb907-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-scrql\" (UID: \"cfa749f0-83ce-4ba6-8a3e-e43257bdb907\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812644 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-config\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812662 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-certificates\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812691 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-metrics-tls\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812708 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fhp8\" (UniqueName: \"kubernetes.io/projected/cfa749f0-83ce-4ba6-8a3e-e43257bdb907-kube-api-access-2fhp8\") pod \"multus-admission-controller-857f4d67dd-scrql\" (UID: \"cfa749f0-83ce-4ba6-8a3e-e43257bdb907\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812728 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l54lr\" (UniqueName: \"kubernetes.io/projected/ca30ea3d-b3d5-478c-a154-284e721664d7-kube-api-access-l54lr\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812752 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6469e01b-cfc6-4ec9-87de-29c6eeee136f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7w9b\" (UID: \"6469e01b-cfc6-4ec9-87de-29c6eeee136f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812770 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8a478166-ab28-4186-a9c2-f079c8b2f2d2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812787 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9bfs\" (UniqueName: \"kubernetes.io/projected/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-kube-api-access-t9bfs\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812802 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-default-certificate\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812818 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6fsx\" (UniqueName: \"kubernetes.io/projected/d04e099e-931d-4fe3-9d85-196a1d40ebd8-kube-api-access-h6fsx\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812833 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b8139c8-66be-4f40-a084-aa26d58554bb-serving-cert\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812851 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812871 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-srv-cert\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812887 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr8jm\" (UniqueName: \"kubernetes.io/projected/6469e01b-cfc6-4ec9-87de-29c6eeee136f-kube-api-access-qr8jm\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7w9b\" (UID: \"6469e01b-cfc6-4ec9-87de-29c6eeee136f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812901 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-stats-auth\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812916 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/12c6bbad-70a1-41c6-a818-d9ec535873e3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812932 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-trusted-ca\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812946 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-profile-collector-cert\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812961 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx95c\" (UniqueName: \"kubernetes.io/projected/12c6bbad-70a1-41c6-a818-d9ec535873e3-kube-api-access-sx95c\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812976 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-etcd-ca\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.812998 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/12c6bbad-70a1-41c6-a818-d9ec535873e3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813014 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whtlr\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-kube-api-access-whtlr\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813029 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-kube-api-access-n7ppl\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813045 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-etcd-service-ca\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813060 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe14e44-05d7-460b-b4aa-462d435e8c62-config\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813075 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzpsn\" (UniqueName: \"kubernetes.io/projected/9d183ad2-4424-442f-a662-3572267b54fd-kube-api-access-dzpsn\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813108 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813123 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrbj5\" (UniqueName: \"kubernetes.io/projected/4b8139c8-66be-4f40-a084-aa26d58554bb-kube-api-access-jrbj5\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813137 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-serving-cert\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813185 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca30ea3d-b3d5-478c-a154-284e721664d7-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813221 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-tls\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813236 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9d183ad2-4424-442f-a662-3572267b54fd-etcd-client\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813251 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d04e099e-931d-4fe3-9d85-196a1d40ebd8-service-ca-bundle\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813269 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bfe14e44-05d7-460b-b4aa-462d435e8c62-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813294 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-config\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813310 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-bound-sa-token\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813326 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxnqh\" (UniqueName: \"kubernetes.io/projected/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-kube-api-access-rxnqh\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813341 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-trusted-ca\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813354 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813368 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-config\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813385 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/12c6bbad-70a1-41c6-a818-d9ec535873e3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813398 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-metrics-certs\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813414 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bfe14e44-05d7-460b-b4aa-462d435e8c62-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813438 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-client-ca\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813454 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca30ea3d-b3d5-478c-a154-284e721664d7-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813469 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d183ad2-4424-442f-a662-3572267b54fd-serving-cert\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813486 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813502 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-trusted-ca\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813516 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a478166-ab28-4186-a9c2-f079c8b2f2d2-serving-cert\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813549 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zkb9\" (UniqueName: \"kubernetes.io/projected/8a478166-ab28-4186-a9c2-f079c8b2f2d2-kube-api-access-4zkb9\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.813576 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.814519 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-gzwfn"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.814570 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7zd2w"] Nov 21 19:03:18 crc kubenswrapper[4701]: E1121 19:03:18.815464 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.315450234 +0000 UTC m=+90.100590261 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.817670 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca30ea3d-b3d5-478c-a154-284e721664d7-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.818892 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-trusted-ca\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.822677 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.822940 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.830341 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-8gkzf"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.830392 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.832486 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-metrics-tls\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.833439 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca30ea3d-b3d5-478c-a154-284e721664d7-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.835061 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.836295 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.837502 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-scrql"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.838539 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wzsrk"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.839949 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.841631 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.842468 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-2hzx9"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.843319 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.844160 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.857561 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.857668 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.857871 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.860114 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.862686 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.863944 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.864879 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.865931 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zmch7"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.867347 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.870284 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.870305 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.871375 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.872701 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-cwddx"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.874272 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v6w6b"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.875411 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.876601 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.877757 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.877871 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.879018 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.880675 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-hbp5b"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.882058 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rfbjh"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.883610 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2b8wr"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.885165 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-z6z69"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.890359 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.894412 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-9d774"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.897435 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9d774" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.901728 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.903305 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2hzx9"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.905829 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.910871 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-f2jgv"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.914000 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-twmdp"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.914030 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.914118 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.914684 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:18 crc kubenswrapper[4701]: E1121 19:03:18.914959 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.414928376 +0000 UTC m=+90.200068403 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915184 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-config\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915280 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f741f928-61fd-41d5-b8c8-879a4744fa2e-images\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915324 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-auth-proxy-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915353 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-client-ca\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915386 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-metrics-certs\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915414 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915444 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/12c6bbad-70a1-41c6-a818-d9ec535873e3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915473 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bfe14e44-05d7-460b-b4aa-462d435e8c62-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915499 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-serving-cert\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915525 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915571 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/56606974-5260-4587-b1cd-17e7ad12868b-proxy-tls\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915601 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d183ad2-4424-442f-a662-3572267b54fd-serving-cert\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915629 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57fc3016-4534-4be7-a281-b353f13830b9-config\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915658 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915689 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915718 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a478166-ab28-4186-a9c2-f079c8b2f2d2-serving-cert\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915745 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-trusted-ca-bundle\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915778 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c0380-cc9d-4363-94be-92be6aeb94ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915804 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915808 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaed9de5-8fa2-4493-ab19-2a79c17c6241-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915842 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtm8m\" (UniqueName: \"kubernetes.io/projected/b3e75990-afff-41bb-a78e-3d04223bbb6c-kube-api-access-mtm8m\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915871 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-config\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915908 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-certificates\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915953 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-service-ca\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.915999 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzz8d\" (UniqueName: \"kubernetes.io/projected/276e2cb3-e02e-4122-b10b-a454198b7954-kube-api-access-pzz8d\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916059 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-dir\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916129 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/276e2cb3-e02e-4122-b10b-a454198b7954-config-volume\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916178 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb2v5\" (UniqueName: \"kubernetes.io/projected/7e4a0b68-874d-4da2-9286-12b84e37e090-kube-api-access-qb2v5\") pod \"package-server-manager-789f6589d5-twqqt\" (UID: \"7e4a0b68-874d-4da2-9286-12b84e37e090\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916245 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt2mw\" (UniqueName: \"kubernetes.io/projected/a2f5b911-dc9c-4009-a4b0-da201a34f156-kube-api-access-rt2mw\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916277 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6fsx\" (UniqueName: \"kubernetes.io/projected/d04e099e-931d-4fe3-9d85-196a1d40ebd8-kube-api-access-h6fsx\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916309 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b8139c8-66be-4f40-a084-aa26d58554bb-serving-cert\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916339 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916367 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7e4a0b68-874d-4da2-9286-12b84e37e090-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-twqqt\" (UID: \"7e4a0b68-874d-4da2-9286-12b84e37e090\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916408 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sw8h\" (UniqueName: \"kubernetes.io/projected/f7def574-9941-4933-83df-3f20df5797d4-kube-api-access-4sw8h\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916434 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-policies\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916465 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/12c6bbad-70a1-41c6-a818-d9ec535873e3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916524 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4167d110-2211-4862-af3d-b6b4a88a0bfd-serving-cert\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916750 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-trusted-ca\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916783 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-etcd-ca\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916811 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916811 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-config\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916840 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whtlr\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-kube-api-access-whtlr\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.916912 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-kube-api-access-n7ppl\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.917031 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/12c6bbad-70a1-41c6-a818-d9ec535873e3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.918809 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.918959 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzpsn\" (UniqueName: \"kubernetes.io/projected/9d183ad2-4424-442f-a662-3572267b54fd-kube-api-access-dzpsn\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919065 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919106 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-oauth-serving-cert\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919155 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrbj5\" (UniqueName: \"kubernetes.io/projected/4b8139c8-66be-4f40-a084-aa26d58554bb-kube-api-access-jrbj5\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919252 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-serving-cert\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919292 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9tk4\" (UniqueName: \"kubernetes.io/projected/3dc5a393-ee87-4a1c-b786-9523a05de343-kube-api-access-w9tk4\") pod \"dns-operator-744455d44c-2b8wr\" (UID: \"3dc5a393-ee87-4a1c-b786-9523a05de343\") " pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919335 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919366 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-serving-cert\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919387 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.919404 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.918869 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-etcd-ca\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920147 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d183ad2-4424-442f-a662-3572267b54fd-serving-cert\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920554 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-auth-proxy-config\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920591 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bfe14e44-05d7-460b-b4aa-462d435e8c62-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920622 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b8139c8-66be-4f40-a084-aa26d58554bb-serving-cert\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920629 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d04e099e-931d-4fe3-9d85-196a1d40ebd8-service-ca-bundle\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920679 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920722 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtj8l\" (UniqueName: \"kubernetes.io/projected/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-kube-api-access-wtj8l\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920758 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/12c6bbad-70a1-41c6-a818-d9ec535873e3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920842 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.920916 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-bound-sa-token\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921042 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921080 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921132 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-console-config\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921298 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-config\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921397 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d04e099e-931d-4fe3-9d85-196a1d40ebd8-service-ca-bundle\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921474 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-trusted-ca\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921670 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1c076709-6bae-4b64-9169-2aed68c813cd-tmpfs\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921751 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-metrics-certs\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921795 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-machine-approver-tls\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921895 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921947 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk6v9\" (UniqueName: \"kubernetes.io/projected/5d7aff3f-cf94-411b-b23b-c91f58cdc2f6-kube-api-access-gk6v9\") pod \"downloads-7954f5f757-hbp5b\" (UID: \"5d7aff3f-cf94-411b-b23b-c91f58cdc2f6\") " pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.921996 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922057 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-config\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922110 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-trusted-ca\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922253 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-audit-dir\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922292 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922387 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-client-ca\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922467 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-config\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922608 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jhcz\" (UniqueName: \"kubernetes.io/projected/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-kube-api-access-8jhcz\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922691 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zkb9\" (UniqueName: \"kubernetes.io/projected/8a478166-ab28-4186-a9c2-f079c8b2f2d2-kube-api-access-4zkb9\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922770 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922822 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-signing-cabundle\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922879 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922919 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-audit-policies\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922908 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-config\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.922994 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923150 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq9bv\" (UniqueName: \"kubernetes.io/projected/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-kube-api-access-zq9bv\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923241 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-service-ca-bundle\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923393 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cfa749f0-83ce-4ba6-8a3e-e43257bdb907-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-scrql\" (UID: \"cfa749f0-83ce-4ba6-8a3e-e43257bdb907\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923466 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-images\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923614 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9d774"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923661 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/56606974-5260-4587-b1cd-17e7ad12868b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923714 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3dc5a393-ee87-4a1c-b786-9523a05de343-metrics-tls\") pod \"dns-operator-744455d44c-2b8wr\" (UID: \"3dc5a393-ee87-4a1c-b786-9523a05de343\") " pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923743 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/276e2cb3-e02e-4122-b10b-a454198b7954-secret-volume\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923787 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f9560d6-8ab5-46f8-bd69-7bca42610547-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923815 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8kw2\" (UniqueName: \"kubernetes.io/projected/8b089186-0669-456a-bea2-60b001261161-kube-api-access-b8kw2\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923848 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fhp8\" (UniqueName: \"kubernetes.io/projected/cfa749f0-83ce-4ba6-8a3e-e43257bdb907-kube-api-access-2fhp8\") pod \"multus-admission-controller-857f4d67dd-scrql\" (UID: \"cfa749f0-83ce-4ba6-8a3e-e43257bdb907\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923928 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57fc3016-4534-4be7-a281-b353f13830b9-serving-cert\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923965 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzkk6\" (UniqueName: \"kubernetes.io/projected/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-kube-api-access-hzkk6\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.923985 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c076709-6bae-4b64-9169-2aed68c813cd-webhook-cert\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924008 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9dz7\" (UniqueName: \"kubernetes.io/projected/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-kube-api-access-l9dz7\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924029 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6469e01b-cfc6-4ec9-87de-29c6eeee136f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7w9b\" (UID: \"6469e01b-cfc6-4ec9-87de-29c6eeee136f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924053 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8a478166-ab28-4186-a9c2-f079c8b2f2d2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924119 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9bfs\" (UniqueName: \"kubernetes.io/projected/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-kube-api-access-t9bfs\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924145 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-default-certificate\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924166 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-etcd-client\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924189 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-proxy-tls\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924225 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-oauth-config\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924285 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-srv-cert\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924309 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924330 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-config\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924363 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr8jm\" (UniqueName: \"kubernetes.io/projected/6469e01b-cfc6-4ec9-87de-29c6eeee136f-kube-api-access-qr8jm\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7w9b\" (UID: \"6469e01b-cfc6-4ec9-87de-29c6eeee136f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924382 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-stats-auth\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924401 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-signing-key\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924541 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8a478166-ab28-4186-a9c2-f079c8b2f2d2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.924746 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-trusted-ca\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: E1121 19:03:18.924987 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.424970105 +0000 UTC m=+90.210110132 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.925823 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a59af599-0d79-4301-8c37-e0e7189477ad-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7fs2f\" (UID: \"a59af599-0d79-4301-8c37-e0e7189477ad\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926161 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24j9g\" (UniqueName: \"kubernetes.io/projected/a59af599-0d79-4301-8c37-e0e7189477ad-kube-api-access-24j9g\") pod \"cluster-samples-operator-665b6dd947-7fs2f\" (UID: \"a59af599-0d79-4301-8c37-e0e7189477ad\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926168 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-client-ca\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926238 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c0380-cc9d-4363-94be-92be6aeb94ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926352 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-profile-collector-cert\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926401 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx95c\" (UniqueName: \"kubernetes.io/projected/12c6bbad-70a1-41c6-a818-d9ec535873e3-kube-api-access-sx95c\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926434 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f9560d6-8ab5-46f8-bd69-7bca42610547-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926436 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-serving-cert\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926479 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjvng\" (UniqueName: \"kubernetes.io/projected/4c931253-2864-49ef-a35a-0e7c04e2d75e-kube-api-access-kjvng\") pod \"migrator-59844c95c7-zt9ht\" (UID: \"4c931253-2864-49ef-a35a-0e7c04e2d75e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926511 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/92358a71-dd66-49c9-8cc2-83cf555207d4-srv-cert\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926643 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7bpg\" (UniqueName: \"kubernetes.io/projected/92358a71-dd66-49c9-8cc2-83cf555207d4-kube-api-access-l7bpg\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926682 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/12c6bbad-70a1-41c6-a818-d9ec535873e3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926725 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaed9de5-8fa2-4493-ab19-2a79c17c6241-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926746 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926725 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a478166-ab28-4186-a9c2-f079c8b2f2d2-serving-cert\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926851 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/92358a71-dd66-49c9-8cc2-83cf555207d4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926898 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pbvv\" (UniqueName: \"kubernetes.io/projected/aaed9de5-8fa2-4493-ab19-2a79c17c6241-kube-api-access-7pbvv\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.926960 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.927004 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c076709-6bae-4b64-9169-2aed68c813cd-apiservice-cert\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.927055 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-etcd-service-ca\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.927090 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-encryption-config\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.927396 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-certificates\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.927715 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9d183ad2-4424-442f-a662-3572267b54fd-etcd-service-ca\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928119 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928261 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe14e44-05d7-460b-b4aa-462d435e8c62-config\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.927175 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe14e44-05d7-460b-b4aa-462d435e8c62-config\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928514 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f741f928-61fd-41d5-b8c8-879a4744fa2e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928642 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l2xf\" (UniqueName: \"kubernetes.io/projected/4167d110-2211-4862-af3d-b6b4a88a0bfd-kube-api-access-4l2xf\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928709 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/601c0380-cc9d-4363-94be-92be6aeb94ac-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928868 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm5vh\" (UniqueName: \"kubernetes.io/projected/f741f928-61fd-41d5-b8c8-879a4744fa2e-kube-api-access-fm5vh\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.928990 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvd4w\" (UniqueName: \"kubernetes.io/projected/1c076709-6bae-4b64-9169-2aed68c813cd-kube-api-access-tvd4w\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929053 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-tls\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929285 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-default-certificate\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929455 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9d183ad2-4424-442f-a662-3572267b54fd-etcd-client\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929579 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gswgz\" (UniqueName: \"kubernetes.io/projected/56606974-5260-4587-b1cd-17e7ad12868b-kube-api-access-gswgz\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929460 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-profile-collector-cert\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929647 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssdwm\" (UniqueName: \"kubernetes.io/projected/57fc3016-4534-4be7-a281-b353f13830b9-kube-api-access-ssdwm\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929673 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9560d6-8ab5-46f8-bd69-7bca42610547-config\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929733 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929786 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8b089186-0669-456a-bea2-60b001261161-serving-cert\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.929922 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f741f928-61fd-41d5-b8c8-879a4744fa2e-config\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.930064 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-wlgv5"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.930104 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6469e01b-cfc6-4ec9-87de-29c6eeee136f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7w9b\" (UID: \"6469e01b-cfc6-4ec9-87de-29c6eeee136f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.930439 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d04e099e-931d-4fe3-9d85-196a1d40ebd8-stats-auth\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.931063 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.931843 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-srv-cert\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.932066 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-tls\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.932176 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9d183ad2-4424-442f-a662-3572267b54fd-etcd-client\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.932394 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.932549 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-f2jgv"] Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.934656 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cfa749f0-83ce-4ba6-8a3e-e43257bdb907-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-scrql\" (UID: \"cfa749f0-83ce-4ba6-8a3e-e43257bdb907\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.935790 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bfe14e44-05d7-460b-b4aa-462d435e8c62-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.939407 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.950006 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.950048 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.958675 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 21 19:03:18 crc kubenswrapper[4701]: I1121 19:03:18.990435 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ldn5n" podStartSLOduration=63.990416754 podStartE2EDuration="1m3.990416754s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:18.987717175 +0000 UTC m=+89.772857202" watchObservedRunningTime="2025-11-21 19:03:18.990416754 +0000 UTC m=+89.775556791" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.030865 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031002 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c0380-cc9d-4363-94be-92be6aeb94ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031031 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaed9de5-8fa2-4493-ab19-2a79c17c6241-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031056 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtm8m\" (UniqueName: \"kubernetes.io/projected/b3e75990-afff-41bb-a78e-3d04223bbb6c-kube-api-access-mtm8m\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031083 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a518559-4b60-4e05-b0e1-7b2ef4b30817-config-volume\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031108 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-service-ca\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031148 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzz8d\" (UniqueName: \"kubernetes.io/projected/276e2cb3-e02e-4122-b10b-a454198b7954-kube-api-access-pzz8d\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031169 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-dir\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031214 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/276e2cb3-e02e-4122-b10b-a454198b7954-config-volume\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031248 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb2v5\" (UniqueName: \"kubernetes.io/projected/7e4a0b68-874d-4da2-9286-12b84e37e090-kube-api-access-qb2v5\") pod \"package-server-manager-789f6589d5-twqqt\" (UID: \"7e4a0b68-874d-4da2-9286-12b84e37e090\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.031316 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.531292217 +0000 UTC m=+90.316432334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031524 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt2mw\" (UniqueName: \"kubernetes.io/projected/a2f5b911-dc9c-4009-a4b0-da201a34f156-kube-api-access-rt2mw\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031710 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031439 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-dir\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.032509 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-service-ca\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.031850 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7e4a0b68-874d-4da2-9286-12b84e37e090-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-twqqt\" (UID: \"7e4a0b68-874d-4da2-9286-12b84e37e090\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.032723 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-policies\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.032792 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sw8h\" (UniqueName: \"kubernetes.io/projected/f7def574-9941-4933-83df-3f20df5797d4-kube-api-access-4sw8h\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.032834 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4167d110-2211-4862-af3d-b6b4a88a0bfd-serving-cert\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.032885 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.032958 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-mountpoint-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033002 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033036 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-oauth-serving-cert\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033099 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9tk4\" (UniqueName: \"kubernetes.io/projected/3dc5a393-ee87-4a1c-b786-9523a05de343-kube-api-access-w9tk4\") pod \"dns-operator-744455d44c-2b8wr\" (UID: \"3dc5a393-ee87-4a1c-b786-9523a05de343\") " pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033109 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/276e2cb3-e02e-4122-b10b-a454198b7954-config-volume\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033145 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033184 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-serving-cert\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033271 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033309 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033344 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4m2t\" (UniqueName: \"kubernetes.io/projected/b6dceb79-b806-4504-bc30-70497679c75d-kube-api-access-v4m2t\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033386 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-auth-proxy-config\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033439 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033478 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtj8l\" (UniqueName: \"kubernetes.io/projected/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-kube-api-access-wtj8l\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033502 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033533 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033550 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033567 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-console-config\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033591 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1c076709-6bae-4b64-9169-2aed68c813cd-tmpfs\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033610 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-machine-approver-tls\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033631 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk6v9\" (UniqueName: \"kubernetes.io/projected/5d7aff3f-cf94-411b-b23b-c91f58cdc2f6-kube-api-access-gk6v9\") pod \"downloads-7954f5f757-hbp5b\" (UID: \"5d7aff3f-cf94-411b-b23b-c91f58cdc2f6\") " pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033649 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033670 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-socket-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033692 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-audit-dir\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033708 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033725 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-registration-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033748 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-config\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033768 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-cert\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033787 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-node-bootstrap-token\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033809 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jhcz\" (UniqueName: \"kubernetes.io/projected/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-kube-api-access-8jhcz\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033830 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4a518559-4b60-4e05-b0e1-7b2ef4b30817-metrics-tls\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033849 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rffw2\" (UniqueName: \"kubernetes.io/projected/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-kube-api-access-rffw2\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033878 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033897 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-signing-cabundle\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033946 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1c076709-6bae-4b64-9169-2aed68c813cd-tmpfs\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.033973 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6klzr\" (UniqueName: \"kubernetes.io/projected/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-kube-api-access-6klzr\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034002 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-audit-policies\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034027 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq9bv\" (UniqueName: \"kubernetes.io/projected/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-kube-api-access-zq9bv\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034048 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-service-ca-bundle\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034070 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-images\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034089 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/56606974-5260-4587-b1cd-17e7ad12868b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034107 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3dc5a393-ee87-4a1c-b786-9523a05de343-metrics-tls\") pod \"dns-operator-744455d44c-2b8wr\" (UID: \"3dc5a393-ee87-4a1c-b786-9523a05de343\") " pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034125 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/276e2cb3-e02e-4122-b10b-a454198b7954-secret-volume\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034126 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-audit-dir\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034143 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8kw2\" (UniqueName: \"kubernetes.io/projected/8b089186-0669-456a-bea2-60b001261161-kube-api-access-b8kw2\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034170 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f9560d6-8ab5-46f8-bd69-7bca42610547-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034277 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57fc3016-4534-4be7-a281-b353f13830b9-serving-cert\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034365 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzkk6\" (UniqueName: \"kubernetes.io/projected/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-kube-api-access-hzkk6\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034724 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-auth-proxy-config\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034898 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c076709-6bae-4b64-9169-2aed68c813cd-webhook-cert\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.034934 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9dz7\" (UniqueName: \"kubernetes.io/projected/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-kube-api-access-l9dz7\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035072 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-signing-cabundle\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035089 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/56606974-5260-4587-b1cd-17e7ad12868b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035235 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-csi-data-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035282 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-plugins-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035338 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-etcd-client\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035370 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-proxy-tls\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035395 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-oauth-config\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035544 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035573 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-config\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035606 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-signing-key\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035628 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24j9g\" (UniqueName: \"kubernetes.io/projected/a59af599-0d79-4301-8c37-e0e7189477ad-kube-api-access-24j9g\") pod \"cluster-samples-operator-665b6dd947-7fs2f\" (UID: \"a59af599-0d79-4301-8c37-e0e7189477ad\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035645 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c0380-cc9d-4363-94be-92be6aeb94ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035660 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a59af599-0d79-4301-8c37-e0e7189477ad-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7fs2f\" (UID: \"a59af599-0d79-4301-8c37-e0e7189477ad\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035684 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f9560d6-8ab5-46f8-bd69-7bca42610547-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035703 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjvng\" (UniqueName: \"kubernetes.io/projected/4c931253-2864-49ef-a35a-0e7c04e2d75e-kube-api-access-kjvng\") pod \"migrator-59844c95c7-zt9ht\" (UID: \"4c931253-2864-49ef-a35a-0e7c04e2d75e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035717 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/92358a71-dd66-49c9-8cc2-83cf555207d4-srv-cert\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035741 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7bpg\" (UniqueName: \"kubernetes.io/projected/92358a71-dd66-49c9-8cc2-83cf555207d4-kube-api-access-l7bpg\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035761 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaed9de5-8fa2-4493-ab19-2a79c17c6241-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035776 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035791 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/92358a71-dd66-49c9-8cc2-83cf555207d4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035807 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035822 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c076709-6bae-4b64-9169-2aed68c813cd-apiservice-cert\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035839 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pbvv\" (UniqueName: \"kubernetes.io/projected/aaed9de5-8fa2-4493-ab19-2a79c17c6241-kube-api-access-7pbvv\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035856 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-encryption-config\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035876 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f741f928-61fd-41d5-b8c8-879a4744fa2e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035910 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l2xf\" (UniqueName: \"kubernetes.io/projected/4167d110-2211-4862-af3d-b6b4a88a0bfd-kube-api-access-4l2xf\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035926 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/601c0380-cc9d-4363-94be-92be6aeb94ac-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035942 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm5vh\" (UniqueName: \"kubernetes.io/projected/f741f928-61fd-41d5-b8c8-879a4744fa2e-kube-api-access-fm5vh\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035960 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvd4w\" (UniqueName: \"kubernetes.io/projected/1c076709-6bae-4b64-9169-2aed68c813cd-kube-api-access-tvd4w\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.035978 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gswgz\" (UniqueName: \"kubernetes.io/projected/56606974-5260-4587-b1cd-17e7ad12868b-kube-api-access-gswgz\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036003 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9560d6-8ab5-46f8-bd69-7bca42610547-config\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036017 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036035 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8b089186-0669-456a-bea2-60b001261161-serving-cert\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036050 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f741f928-61fd-41d5-b8c8-879a4744fa2e-config\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036066 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssdwm\" (UniqueName: \"kubernetes.io/projected/57fc3016-4534-4be7-a281-b353f13830b9-kube-api-access-ssdwm\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036081 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f741f928-61fd-41d5-b8c8-879a4744fa2e-images\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036102 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-auth-proxy-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036116 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-client-ca\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036135 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036151 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-serving-cert\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036167 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036189 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/56606974-5260-4587-b1cd-17e7ad12868b-proxy-tls\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036219 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-certs\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036239 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57fc3016-4534-4be7-a281-b353f13830b9-config\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036254 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036272 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-trusted-ca-bundle\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036289 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb5wl\" (UniqueName: \"kubernetes.io/projected/4a518559-4b60-4e05-b0e1-7b2ef4b30817-kube-api-access-rb5wl\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.036654 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-console-config\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.037767 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.537740337 +0000 UTC m=+90.322880454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.039019 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.039308 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c076709-6bae-4b64-9169-2aed68c813cd-webhook-cert\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.039559 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/276e2cb3-e02e-4122-b10b-a454198b7954-secret-volume\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.039880 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57fc3016-4534-4be7-a281-b353f13830b9-serving-cert\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.040028 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-oauth-config\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.040061 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-oauth-serving-cert\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.040093 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57fc3016-4534-4be7-a281-b353f13830b9-config\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.041302 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7e4a0b68-874d-4da2-9286-12b84e37e090-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-twqqt\" (UID: \"7e4a0b68-874d-4da2-9286-12b84e37e090\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.041348 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-trusted-ca-bundle\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.042952 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-serving-cert\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.044135 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-signing-key\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.044231 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a59af599-0d79-4301-8c37-e0e7189477ad-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7fs2f\" (UID: \"a59af599-0d79-4301-8c37-e0e7189477ad\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.044259 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/56606974-5260-4587-b1cd-17e7ad12868b-proxy-tls\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.044780 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/92358a71-dd66-49c9-8cc2-83cf555207d4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.044973 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c076709-6bae-4b64-9169-2aed68c813cd-apiservice-cert\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.046159 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/92358a71-dd66-49c9-8cc2-83cf555207d4-srv-cert\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.046860 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.051068 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.051161 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.051518 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.078180 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.080174 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.088332 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-encryption-config\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.091192 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-etcd-client\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.094827 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-serving-cert\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.098660 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.105057 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-audit-policies\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.118246 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.125499 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137046 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.137161 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.637142068 +0000 UTC m=+90.422282095 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137272 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6klzr\" (UniqueName: \"kubernetes.io/projected/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-kube-api-access-6klzr\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137431 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-csi-data-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137464 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-plugins-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137529 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137550 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-csi-data-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137730 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-plugins-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.137960 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-certs\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138023 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb5wl\" (UniqueName: \"kubernetes.io/projected/4a518559-4b60-4e05-b0e1-7b2ef4b30817-kube-api-access-rb5wl\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.138027 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.638018957 +0000 UTC m=+90.423158984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138100 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a518559-4b60-4e05-b0e1-7b2ef4b30817-config-volume\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138312 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-mountpoint-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138415 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4m2t\" (UniqueName: \"kubernetes.io/projected/b6dceb79-b806-4504-bc30-70497679c75d-kube-api-access-v4m2t\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138511 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-mountpoint-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-socket-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138583 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-registration-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138627 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-cert\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138658 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-node-bootstrap-token\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138667 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-registration-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138701 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4a518559-4b60-4e05-b0e1-7b2ef4b30817-metrics-tls\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138591 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138927 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rffw2\" (UniqueName: \"kubernetes.io/projected/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-kube-api-access-rffw2\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.138706 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b6dceb79-b806-4504-bc30-70497679c75d-socket-dir\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.144763 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.158990 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.178528 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.187557 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-config\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.194156 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-8gkzf"] Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.198440 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.217484 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.239957 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.241457 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.242154 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.742118389 +0000 UTC m=+90.527258436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.242846 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.244162 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.744142594 +0000 UTC m=+90.529282641 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.258596 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.267820 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4167d110-2211-4862-af3d-b6b4a88a0bfd-serving-cert\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.278302 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.299127 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.309330 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-client-ca\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.318515 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.325279 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaed9de5-8fa2-4493-ab19-2a79c17c6241-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.337948 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.353765 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.353978 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.853943981 +0000 UTC m=+90.639084048 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.354810 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.355560 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.855527436 +0000 UTC m=+90.640667503 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.360005 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.362292 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaed9de5-8fa2-4493-ab19-2a79c17c6241-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.378848 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.398868 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.419259 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.425297 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.439378 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.456853 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.457043 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.957011721 +0000 UTC m=+90.742151758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.458536 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.458786 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.458938 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:19.958928182 +0000 UTC m=+90.744068219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.469517 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.479519 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.485819 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.515550 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.526535 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.526560 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.538563 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.538746 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.550312 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.558613 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.560144 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.561343 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.061313138 +0000 UTC m=+90.846453355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.561508 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.562257 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.062246798 +0000 UTC m=+90.847386835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.570796 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.578787 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.599567 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.623299 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.629638 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.634690 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.638597 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.642149 4701 generic.go:334] "Generic (PLEG): container finished" podID="3f95c2f8-00de-4a99-a573-3c5ccea86d5f" containerID="f10a70ec71edd61fd1a7388a6a09c0be1107ce2db85bc3782680d8c4d51589f5" exitCode=0 Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.642428 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" event={"ID":"3f95c2f8-00de-4a99-a573-3c5ccea86d5f","Type":"ContainerDied","Data":"f10a70ec71edd61fd1a7388a6a09c0be1107ce2db85bc3782680d8c4d51589f5"} Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.642539 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" event={"ID":"3f95c2f8-00de-4a99-a573-3c5ccea86d5f","Type":"ContainerStarted","Data":"7c1a27d5d48f2b055da806e9307a012c1c27d4a34e2f1c8672066c56503b0628"} Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.644338 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.662902 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.664335 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.164307357 +0000 UTC m=+90.949447414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.670053 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.675691 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.679337 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.699817 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.720652 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.721574 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-policies\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.739544 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.760229 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.765482 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.766485 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.266465277 +0000 UTC m=+91.051605414 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.772421 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3dc5a393-ee87-4a1c-b786-9523a05de343-metrics-tls\") pod \"dns-operator-744455d44c-2b8wr\" (UID: \"3dc5a393-ee87-4a1c-b786-9523a05de343\") " pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.776436 4701 request.go:700] Waited for 1.012085127s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns-operator/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.778646 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.799018 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.818741 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.838710 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.852393 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8b089186-0669-456a-bea2-60b001261161-serving-cert\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.866087 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.866346 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.366297796 +0000 UTC m=+91.151437823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.867023 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.867648 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.367621775 +0000 UTC m=+91.152762022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.868952 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.879332 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.885233 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-service-ca-bundle\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.899148 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.919382 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.939819 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.949935 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f741f928-61fd-41d5-b8c8-879a4744fa2e-images\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.950305 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.950378 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.959019 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.969376 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:19 crc kubenswrapper[4701]: E1121 19:03:19.970498 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.470477721 +0000 UTC m=+91.255617748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.980482 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.983377 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f741f928-61fd-41d5-b8c8-879a4744fa2e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:19 crc kubenswrapper[4701]: I1121 19:03:19.998311 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.009991 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f741f928-61fd-41d5-b8c8-879a4744fa2e-config\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.019282 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.031959 4701 secret.go:188] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.032397 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/601c0380-cc9d-4363-94be-92be6aeb94ac-serving-cert podName:601c0380-cc9d-4363-94be-92be6aeb94ac nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.532359322 +0000 UTC m=+91.317499389 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/601c0380-cc9d-4363-94be-92be6aeb94ac-serving-cert") pod "openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" (UID: "601c0380-cc9d-4363-94be-92be6aeb94ac") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.034181 4701 secret.go:188] Couldn't get secret openshift-cluster-machine-approver/machine-approver-tls: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.034242 4701 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.034310 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-serving-cert podName:4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.534290544 +0000 UTC m=+91.319430591 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-nzzw5" (UID: "4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.034355 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-machine-approver-tls podName:1da8a1e4-422a-45dc-aa36-ae559ea1dc14 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.534324385 +0000 UTC m=+91.319464412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-approver-tls" (UniqueName: "kubernetes.io/secret/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-machine-approver-tls") pod "machine-approver-56656f9798-hcwlb" (UID: "1da8a1e4-422a-45dc-aa36-ae559ea1dc14") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.034743 4701 configmap.go:193] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.034985 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-images podName:9c46cea4-80aa-4a56-9370-0b7f5331c1ee nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.534960679 +0000 UTC m=+91.320100746 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-images") pod "machine-config-operator-74547568cd-w4fcr" (UID: "9c46cea4-80aa-4a56-9370-0b7f5331c1ee") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.035526 4701 secret.go:188] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.035585 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-proxy-tls podName:9c46cea4-80aa-4a56-9370-0b7f5331c1ee nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.535571902 +0000 UTC m=+91.320711939 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-proxy-tls") pod "machine-config-operator-74547568cd-w4fcr" (UID: "9c46cea4-80aa-4a56-9370-0b7f5331c1ee") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.037913 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.038570 4701 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.038786 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-auth-proxy-config podName:1da8a1e4-422a-45dc-aa36-ae559ea1dc14 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.538764911 +0000 UTC m=+91.323904978 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "auth-proxy-config" (UniqueName: "kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-auth-proxy-config") pod "machine-approver-56656f9798-hcwlb" (UID: "1da8a1e4-422a-45dc-aa36-ae559ea1dc14") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.038984 4701 configmap.go:193] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039146 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f9560d6-8ab5-46f8-bd69-7bca42610547-config podName:6f9560d6-8ab5-46f8-bd69-7bca42610547 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.53912967 +0000 UTC m=+91.324269737 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6f9560d6-8ab5-46f8-bd69-7bca42610547-config") pod "kube-controller-manager-operator-78b949d7b-9rlms" (UID: "6f9560d6-8ab5-46f8-bd69-7bca42610547") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039347 4701 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/machine-approver-config: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039394 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-config podName:1da8a1e4-422a-45dc-aa36-ae559ea1dc14 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.539384066 +0000 UTC m=+91.324524083 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-config") pod "machine-approver-56656f9798-hcwlb" (UID: "1da8a1e4-422a-45dc-aa36-ae559ea1dc14") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039445 4701 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039475 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-config podName:4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.539465967 +0000 UTC m=+91.324605984 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-config") pod "openshift-apiserver-operator-796bbdcf4f-nzzw5" (UID: "4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039782 4701 configmap.go:193] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.039839 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/601c0380-cc9d-4363-94be-92be6aeb94ac-config podName:601c0380-cc9d-4363-94be-92be6aeb94ac nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.539809615 +0000 UTC m=+91.324949642 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/601c0380-cc9d-4363-94be-92be6aeb94ac-config") pod "openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" (UID: "601c0380-cc9d-4363-94be-92be6aeb94ac") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.040879 4701 secret.go:188] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.040942 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6f9560d6-8ab5-46f8-bd69-7bca42610547-serving-cert podName:6f9560d6-8ab5-46f8-bd69-7bca42610547 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.540927109 +0000 UTC m=+91.326067146 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6f9560d6-8ab5-46f8-bd69-7bca42610547-serving-cert") pod "kube-controller-manager-operator-78b949d7b-9rlms" (UID: "6f9560d6-8ab5-46f8-bd69-7bca42610547") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.058774 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.071830 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.072363 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.572341034 +0000 UTC m=+91.357481061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.078726 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.088765 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-config\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.089012 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8b089186-0669-456a-bea2-60b001261161-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.101125 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.117850 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.138859 4701 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.138929 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4a518559-4b60-4e05-b0e1-7b2ef4b30817-config-volume podName:4a518559-4b60-4e05-b0e1-7b2ef4b30817 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.638911688 +0000 UTC m=+91.424051705 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/4a518559-4b60-4e05-b0e1-7b2ef4b30817-config-volume") pod "dns-default-9d774" (UID: "4a518559-4b60-4e05-b0e1-7b2ef4b30817") : failed to sync configmap cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139039 4701 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139064 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-certs podName:bbb8093d-f6ea-471b-a855-f7c6d5412f7b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.639057891 +0000 UTC m=+91.424197918 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-certs") pod "machine-config-server-wlgv5" (UID: "bbb8093d-f6ea-471b-a855-f7c6d5412f7b") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139097 4701 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139123 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a518559-4b60-4e05-b0e1-7b2ef4b30817-metrics-tls podName:4a518559-4b60-4e05-b0e1-7b2ef4b30817 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.639113722 +0000 UTC m=+91.424253749 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/4a518559-4b60-4e05-b0e1-7b2ef4b30817-metrics-tls") pod "dns-default-9d774" (UID: "4a518559-4b60-4e05-b0e1-7b2ef4b30817") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139137 4701 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139164 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-cert podName:4fb5ab32-dc0b-40c0-8af0-7cffba411a22 nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.639158013 +0000 UTC m=+91.424298040 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-cert") pod "ingress-canary-2hzx9" (UID: "4fb5ab32-dc0b-40c0-8af0-7cffba411a22") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139178 4701 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.139225 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-node-bootstrap-token podName:bbb8093d-f6ea-471b-a855-f7c6d5412f7b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.639201334 +0000 UTC m=+91.424341361 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-node-bootstrap-token") pod "machine-config-server-wlgv5" (UID: "bbb8093d-f6ea-471b-a855-f7c6d5412f7b") : failed to sync secret cache: timed out waiting for the condition Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.140494 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.158278 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.177263 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.177675 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.677635374 +0000 UTC m=+91.462775431 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.179886 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.199655 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.219455 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.238087 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.258238 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.278283 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.279605 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.280025 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.780007049 +0000 UTC m=+91.565147076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.299173 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.318616 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.339254 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.359599 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.383757 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.383887 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.384009 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.883982889 +0000 UTC m=+91.669122926 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.384502 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.384949 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.884938609 +0000 UTC m=+91.670078636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.400048 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.420636 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.439454 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.480497 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l54lr\" (UniqueName: \"kubernetes.io/projected/ca30ea3d-b3d5-478c-a154-284e721664d7-kube-api-access-l54lr\") pod \"kube-storage-version-migrator-operator-b67b599dd-n6dgf\" (UID: \"ca30ea3d-b3d5-478c-a154-284e721664d7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.485921 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.486705 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:20.986690191 +0000 UTC m=+91.771830218 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.499074 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.513497 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxnqh\" (UniqueName: \"kubernetes.io/projected/7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3-kube-api-access-rxnqh\") pod \"ingress-operator-5b745b69d9-zfhdw\" (UID: \"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.519450 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.538989 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.559813 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.565849 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.580600 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.589423 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-machine-approver-tls\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.589640 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-images\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.589782 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-proxy-tls\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.589830 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.589886 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c0380-cc9d-4363-94be-92be6aeb94ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.589960 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f9560d6-8ab5-46f8-bd69-7bca42610547-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.590161 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9560d6-8ab5-46f8-bd69-7bca42610547-config\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.590212 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.590290 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-auth-proxy-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.590341 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.590429 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c0380-cc9d-4363-94be-92be6aeb94ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.590761 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.591620 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/601c0380-cc9d-4363-94be-92be6aeb94ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.592587 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-images\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.593744 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.594269 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9560d6-8ab5-46f8-bd69-7bca42610547-config\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.594526 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.094512405 +0000 UTC m=+91.879652432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.595433 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.595913 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-machine-approver-tls\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.596006 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.596090 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-auth-proxy-config\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.598763 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.599995 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-proxy-tls\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.600558 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/601c0380-cc9d-4363-94be-92be6aeb94ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.601092 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f9560d6-8ab5-46f8-bd69-7bca42610547-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.618052 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.638935 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.642826 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.656999 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" event={"ID":"3f95c2f8-00de-4a99-a573-3c5ccea86d5f","Type":"ContainerStarted","Data":"b2b4bb5e83b2e3be4dcebd4a04600b2ac7563eda814245f852909ec191a98f03"} Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.657091 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" event={"ID":"3f95c2f8-00de-4a99-a573-3c5ccea86d5f","Type":"ContainerStarted","Data":"8f18e60bcf61b14793476855dafa6c101ef642a3a53de216b512bd9067d6a6f9"} Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.677765 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.679798 4701 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.692094 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.692382 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-certs\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.692477 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a518559-4b60-4e05-b0e1-7b2ef4b30817-config-volume\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.692734 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-cert\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.692781 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-node-bootstrap-token\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.692818 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4a518559-4b60-4e05-b0e1-7b2ef4b30817-metrics-tls\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.693354 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.193320772 +0000 UTC m=+91.978460819 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.695324 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a518559-4b60-4e05-b0e1-7b2ef4b30817-config-volume\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.716463 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.717181 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4a518559-4b60-4e05-b0e1-7b2ef4b30817-metrics-tls\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.717348 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-cert\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.750372 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bfe14e44-05d7-460b-b4aa-462d435e8c62-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-zwplq\" (UID: \"bfe14e44-05d7-460b-b4aa-462d435e8c62\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.759148 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whtlr\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-kube-api-access-whtlr\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.776542 4701 request.go:700] Waited for 1.85825914s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/serviceaccounts/olm-operator-serviceaccount/token Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.790301 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6fsx\" (UniqueName: \"kubernetes.io/projected/d04e099e-931d-4fe3-9d85-196a1d40ebd8-kube-api-access-h6fsx\") pod \"router-default-5444994796-hsngh\" (UID: \"d04e099e-931d-4fe3-9d85-196a1d40ebd8\") " pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.795275 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.796166 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.296147667 +0000 UTC m=+92.081287794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.797953 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/dd5d82de-78d8-4e8e-93f3-fe6ede598dce-kube-api-access-n7ppl\") pod \"catalog-operator-68c6474976-s9t62\" (UID: \"dd5d82de-78d8-4e8e-93f3-fe6ede598dce\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.815245 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrbj5\" (UniqueName: \"kubernetes.io/projected/4b8139c8-66be-4f40-a084-aa26d58554bb-kube-api-access-jrbj5\") pod \"controller-manager-879f6c89f-gdj2w\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.831380 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzpsn\" (UniqueName: \"kubernetes.io/projected/9d183ad2-4424-442f-a662-3572267b54fd-kube-api-access-dzpsn\") pod \"etcd-operator-b45778765-gzwfn\" (UID: \"9d183ad2-4424-442f-a662-3572267b54fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.844664 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf"] Nov 21 19:03:20 crc kubenswrapper[4701]: W1121 19:03:20.854796 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca30ea3d_b3d5_478c_a154_284e721664d7.slice/crio-ed3e777bb23ad795ec33ef8542ced0ec4545cd008572a5a3b84881153f68b271 WatchSource:0}: Error finding container ed3e777bb23ad795ec33ef8542ced0ec4545cd008572a5a3b84881153f68b271: Status 404 returned error can't find the container with id ed3e777bb23ad795ec33ef8542ced0ec4545cd008572a5a3b84881153f68b271 Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.858115 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-bound-sa-token\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.859070 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.873556 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.878250 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zkb9\" (UniqueName: \"kubernetes.io/projected/8a478166-ab28-4186-a9c2-f079c8b2f2d2-kube-api-access-4zkb9\") pod \"openshift-config-operator-7777fb866f-zsvsv\" (UID: \"8a478166-ab28-4186-a9c2-f079c8b2f2d2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:20 crc kubenswrapper[4701]: W1121 19:03:20.885810 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd04e099e_931d_4fe3_9d85_196a1d40ebd8.slice/crio-ad2eca92b43c738527037f853c8d92df387abe7c40466c157a23a4af64c137a2 WatchSource:0}: Error finding container ad2eca92b43c738527037f853c8d92df387abe7c40466c157a23a4af64c137a2: Status 404 returned error can't find the container with id ad2eca92b43c738527037f853c8d92df387abe7c40466c157a23a4af64c137a2 Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.893254 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fhp8\" (UniqueName: \"kubernetes.io/projected/cfa749f0-83ce-4ba6-8a3e-e43257bdb907-kube-api-access-2fhp8\") pod \"multus-admission-controller-857f4d67dd-scrql\" (UID: \"cfa749f0-83ce-4ba6-8a3e-e43257bdb907\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.902472 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw"] Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.903656 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:20 crc kubenswrapper[4701]: E1121 19:03:20.904763 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.404741278 +0000 UTC m=+92.189881305 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.924246 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.925979 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9bfs\" (UniqueName: \"kubernetes.io/projected/6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc-kube-api-access-t9bfs\") pod \"console-operator-58897d9998-7zd2w\" (UID: \"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc\") " pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.935178 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr8jm\" (UniqueName: \"kubernetes.io/projected/6469e01b-cfc6-4ec9-87de-29c6eeee136f-kube-api-access-qr8jm\") pod \"control-plane-machine-set-operator-78cbb6b69f-l7w9b\" (UID: \"6469e01b-cfc6-4ec9-87de-29c6eeee136f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.952301 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.958838 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx95c\" (UniqueName: \"kubernetes.io/projected/12c6bbad-70a1-41c6-a818-d9ec535873e3-kube-api-access-sx95c\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.972501 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/12c6bbad-70a1-41c6-a818-d9ec535873e3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-pz8rd\" (UID: \"12c6bbad-70a1-41c6-a818-d9ec535873e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.978228 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.994641 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-node-bootstrap-token\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:20 crc kubenswrapper[4701]: I1121 19:03:20.997369 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.007014 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.007550 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.507529652 +0000 UTC m=+92.292669789 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.008379 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-certs\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.022452 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.024169 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.031397 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.049320 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.050207 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.063363 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.087724 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.094880 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.102565 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.102749 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.108895 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.109457 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.609440247 +0000 UTC m=+92.394580274 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.127777 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.156036 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtm8m\" (UniqueName: \"kubernetes.io/projected/b3e75990-afff-41bb-a78e-3d04223bbb6c-kube-api-access-mtm8m\") pod \"marketplace-operator-79b997595-v6w6b\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.178759 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzz8d\" (UniqueName: \"kubernetes.io/projected/276e2cb3-e02e-4122-b10b-a454198b7954-kube-api-access-pzz8d\") pod \"collect-profiles-29395860-72qpb\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.190902 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.196623 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb2v5\" (UniqueName: \"kubernetes.io/projected/7e4a0b68-874d-4da2-9286-12b84e37e090-kube-api-access-qb2v5\") pod \"package-server-manager-789f6589d5-twqqt\" (UID: \"7e4a0b68-874d-4da2-9286-12b84e37e090\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.213055 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.213759 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.214159 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.714147843 +0000 UTC m=+92.499287870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.215908 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt2mw\" (UniqueName: \"kubernetes.io/projected/a2f5b911-dc9c-4009-a4b0-da201a34f156-kube-api-access-rt2mw\") pod \"oauth-openshift-558db77b4-twmdp\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.227931 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-scrql"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.258037 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sw8h\" (UniqueName: \"kubernetes.io/projected/f7def574-9941-4933-83df-3f20df5797d4-kube-api-access-4sw8h\") pod \"console-f9d7485db-cwddx\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.267166 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.276958 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9tk4\" (UniqueName: \"kubernetes.io/projected/3dc5a393-ee87-4a1c-b786-9523a05de343-kube-api-access-w9tk4\") pod \"dns-operator-744455d44c-2b8wr\" (UID: \"3dc5a393-ee87-4a1c-b786-9523a05de343\") " pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.284188 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.285154 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk6v9\" (UniqueName: \"kubernetes.io/projected/5d7aff3f-cf94-411b-b23b-c91f58cdc2f6-kube-api-access-gk6v9\") pod \"downloads-7954f5f757-hbp5b\" (UID: \"5d7aff3f-cf94-411b-b23b-c91f58cdc2f6\") " pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:03:21 crc kubenswrapper[4701]: W1121 19:03:21.290925 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd5d82de_78d8_4e8e_93f3_fe6ede598dce.slice/crio-eb09ae87454486ad524982de3a738423c8f15587a8166bdf2eb71e83e3b3a314 WatchSource:0}: Error finding container eb09ae87454486ad524982de3a738423c8f15587a8166bdf2eb71e83e3b3a314: Status 404 returned error can't find the container with id eb09ae87454486ad524982de3a738423c8f15587a8166bdf2eb71e83e3b3a314 Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.291125 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.293117 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.314393 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.314919 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.814902623 +0000 UTC m=+92.600042650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.320273 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jhcz\" (UniqueName: \"kubernetes.io/projected/2ea15e3b-bc7e-491a-8cd7-4275d33abf23-kube-api-access-8jhcz\") pod \"service-ca-9c57cc56f-zmch7\" (UID: \"2ea15e3b-bc7e-491a-8cd7-4275d33abf23\") " pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.321382 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtj8l\" (UniqueName: \"kubernetes.io/projected/9c46cea4-80aa-4a56-9370-0b7f5331c1ee-kube-api-access-wtj8l\") pod \"machine-config-operator-74547568cd-w4fcr\" (UID: \"9c46cea4-80aa-4a56-9370-0b7f5331c1ee\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.323066 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:21 crc kubenswrapper[4701]: W1121 19:03:21.330981 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcfa749f0_83ce_4ba6_8a3e_e43257bdb907.slice/crio-fd6f60fe6ccf02f23ec83c3533a9aec0daaaec0d093df1ec9db492dd7fc2689c WatchSource:0}: Error finding container fd6f60fe6ccf02f23ec83c3533a9aec0daaaec0d093df1ec9db492dd7fc2689c: Status 404 returned error can't find the container with id fd6f60fe6ccf02f23ec83c3533a9aec0daaaec0d093df1ec9db492dd7fc2689c Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.347959 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f9560d6-8ab5-46f8-bd69-7bca42610547-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9rlms\" (UID: \"6f9560d6-8ab5-46f8-bd69-7bca42610547\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.357105 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq9bv\" (UniqueName: \"kubernetes.io/projected/67a65cf0-6dcb-4730-a43b-0be90f5c8a93-kube-api-access-zq9bv\") pod \"apiserver-7bbb656c7d-g7gbn\" (UID: \"67a65cf0-6dcb-4730-a43b-0be90f5c8a93\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.372907 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.377306 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzkk6\" (UniqueName: \"kubernetes.io/projected/4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7-kube-api-access-hzkk6\") pod \"openshift-apiserver-operator-796bbdcf4f-nzzw5\" (UID: \"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.387577 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.399672 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8kw2\" (UniqueName: \"kubernetes.io/projected/8b089186-0669-456a-bea2-60b001261161-kube-api-access-b8kw2\") pod \"authentication-operator-69f744f599-rfbjh\" (UID: \"8b089186-0669-456a-bea2-60b001261161\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.411743 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-gzwfn"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.417182 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.417949 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:21.917936902 +0000 UTC m=+92.703076929 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.432783 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9dz7\" (UniqueName: \"kubernetes.io/projected/1da8a1e4-422a-45dc-aa36-ae559ea1dc14-kube-api-access-l9dz7\") pod \"machine-approver-56656f9798-hcwlb\" (UID: \"1da8a1e4-422a-45dc-aa36-ae559ea1dc14\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.437860 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/601c0380-cc9d-4363-94be-92be6aeb94ac-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4kqhw\" (UID: \"601c0380-cc9d-4363-94be-92be6aeb94ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.460926 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm5vh\" (UniqueName: \"kubernetes.io/projected/f741f928-61fd-41d5-b8c8-879a4744fa2e-kube-api-access-fm5vh\") pod \"machine-api-operator-5694c8668f-z6z69\" (UID: \"f741f928-61fd-41d5-b8c8-879a4744fa2e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.479848 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvd4w\" (UniqueName: \"kubernetes.io/projected/1c076709-6bae-4b64-9169-2aed68c813cd-kube-api-access-tvd4w\") pod \"packageserver-d55dfcdfc-7wcwn\" (UID: \"1c076709-6bae-4b64-9169-2aed68c813cd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.496432 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.496938 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.498105 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.514598 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.515816 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.516684 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.518987 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.519322 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssdwm\" (UniqueName: \"kubernetes.io/projected/57fc3016-4534-4be7-a281-b353f13830b9-kube-api-access-ssdwm\") pod \"service-ca-operator-777779d784-jbq9k\" (UID: \"57fc3016-4534-4be7-a281-b353f13830b9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.519530 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.01951315 +0000 UTC m=+92.804653177 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.522980 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gswgz\" (UniqueName: \"kubernetes.io/projected/56606974-5260-4587-b1cd-17e7ad12868b-kube-api-access-gswgz\") pod \"machine-config-controller-84d6567774-76rvr\" (UID: \"56606974-5260-4587-b1cd-17e7ad12868b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.540699 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.541920 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24j9g\" (UniqueName: \"kubernetes.io/projected/a59af599-0d79-4301-8c37-e0e7189477ad-kube-api-access-24j9g\") pod \"cluster-samples-operator-665b6dd947-7fs2f\" (UID: \"a59af599-0d79-4301-8c37-e0e7189477ad\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.547453 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.554650 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.556702 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pbvv\" (UniqueName: \"kubernetes.io/projected/aaed9de5-8fa2-4493-ab19-2a79c17c6241-kube-api-access-7pbvv\") pod \"openshift-controller-manager-operator-756b6f6bc6-v5bsv\" (UID: \"aaed9de5-8fa2-4493-ab19-2a79c17c6241\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.575718 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.576537 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjvng\" (UniqueName: \"kubernetes.io/projected/4c931253-2864-49ef-a35a-0e7c04e2d75e-kube-api-access-kjvng\") pod \"migrator-59844c95c7-zt9ht\" (UID: \"4c931253-2864-49ef-a35a-0e7c04e2d75e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.594594 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7bpg\" (UniqueName: \"kubernetes.io/projected/92358a71-dd66-49c9-8cc2-83cf555207d4-kube-api-access-l7bpg\") pod \"olm-operator-6b444d44fb-mqbkq\" (UID: \"92358a71-dd66-49c9-8cc2-83cf555207d4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.598006 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.608147 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.615774 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.617886 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l2xf\" (UniqueName: \"kubernetes.io/projected/4167d110-2211-4862-af3d-b6b4a88a0bfd-kube-api-access-4l2xf\") pod \"route-controller-manager-6576b87f9c-75d7z\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.621029 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.621340 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.121327282 +0000 UTC m=+92.906467309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.637300 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.639859 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6klzr\" (UniqueName: \"kubernetes.io/projected/4fb5ab32-dc0b-40c0-8af0-7cffba411a22-kube-api-access-6klzr\") pod \"ingress-canary-2hzx9\" (UID: \"4fb5ab32-dc0b-40c0-8af0-7cffba411a22\") " pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.641752 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.663132 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.665672 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.681576 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" event={"ID":"8a478166-ab28-4186-a9c2-f079c8b2f2d2","Type":"ContainerStarted","Data":"bf8f2a012be78ce66171abc7c92cf1e08a97612c9218e2bef54fd9eb6e17c2a4"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.682016 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.693844 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7zd2w"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.737852 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.738233 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.238202644 +0000 UTC m=+93.023342671 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.757951 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb5wl\" (UniqueName: \"kubernetes.io/projected/4a518559-4b60-4e05-b0e1-7b2ef4b30817-kube-api-access-rb5wl\") pod \"dns-default-9d774\" (UID: \"4a518559-4b60-4e05-b0e1-7b2ef4b30817\") " pod="openshift-dns/dns-default-9d774" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.759254 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" event={"ID":"dd5d82de-78d8-4e8e-93f3-fe6ede598dce","Type":"ContainerStarted","Data":"eb09ae87454486ad524982de3a738423c8f15587a8166bdf2eb71e83e3b3a314"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.759741 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gdj2w"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.759891 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.759938 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.766805 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4m2t\" (UniqueName: \"kubernetes.io/projected/b6dceb79-b806-4504-bc30-70497679c75d-kube-api-access-v4m2t\") pod \"csi-hostpathplugin-f2jgv\" (UID: \"b6dceb79-b806-4504-bc30-70497679c75d\") " pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.767815 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rffw2\" (UniqueName: \"kubernetes.io/projected/bbb8093d-f6ea-471b-a855-f7c6d5412f7b-kube-api-access-rffw2\") pod \"machine-config-server-wlgv5\" (UID: \"bbb8093d-f6ea-471b-a855-f7c6d5412f7b\") " pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.785671 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" event={"ID":"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3","Type":"ContainerStarted","Data":"2a7d11cc3e71b41780e512301162176af162afdd012f70aab2ff8356a794f877"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.785716 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" event={"ID":"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3","Type":"ContainerStarted","Data":"72ef89c43592e61d0f8687af20b4b417da73c138fbab5db60509a3842666aa96"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.785726 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" event={"ID":"7ae80b08-d9c6-48e0-8fee-ce6cbff3b7e3","Type":"ContainerStarted","Data":"1810c3a3a4018ae0e6b15068f6ddbeb137c314727ee47fb204db692b5684045d"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.788239 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" event={"ID":"bfe14e44-05d7-460b-b4aa-462d435e8c62","Type":"ContainerStarted","Data":"671f763a736131d03c728e00b7aead25810d684edd0f32ce74daf23c590a7f90"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.788269 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" event={"ID":"bfe14e44-05d7-460b-b4aa-462d435e8c62","Type":"ContainerStarted","Data":"f76a1613c72302b7bd4643958ac1aa6f113d2699de565a24088e42907a7da0a5"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.806710 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" event={"ID":"ca30ea3d-b3d5-478c-a154-284e721664d7","Type":"ContainerStarted","Data":"3dfc1e724bf88d75fe8bdd829eeefc164e6a3344ed89e1e9385287cc8167d3e8"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.807070 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" event={"ID":"ca30ea3d-b3d5-478c-a154-284e721664d7","Type":"ContainerStarted","Data":"ed3e777bb23ad795ec33ef8542ced0ec4545cd008572a5a3b84881153f68b271"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.829609 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" event={"ID":"12c6bbad-70a1-41c6-a818-d9ec535873e3","Type":"ContainerStarted","Data":"7faadb984516a39d8b8968d64ca9335fd0d0f434d175200f890dd7012cb689bf"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.832878 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" event={"ID":"9d183ad2-4424-442f-a662-3572267b54fd","Type":"ContainerStarted","Data":"4e0293ed3903a88157a6ee06b784e1d6c044758f2efd5ab20ce4ce46f5ebcf84"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.840067 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.840440 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.340413306 +0000 UTC m=+93.125553333 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.848524 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" event={"ID":"cfa749f0-83ce-4ba6-8a3e-e43257bdb907","Type":"ContainerStarted","Data":"fd6f60fe6ccf02f23ec83c3533a9aec0daaaec0d093df1ec9db492dd7fc2689c"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.857429 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-hsngh" event={"ID":"d04e099e-931d-4fe3-9d85-196a1d40ebd8","Type":"ContainerStarted","Data":"ce4aa05e9df5f6f91e32d8cc5427ea7508d48a6827261bc22973d2c06a65f174"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.857464 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-hsngh" event={"ID":"d04e099e-931d-4fe3-9d85-196a1d40ebd8","Type":"ContainerStarted","Data":"ad2eca92b43c738527037f853c8d92df387abe7c40466c157a23a4af64c137a2"} Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.858962 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b"] Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.870363 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.870806 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.871780 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9d774" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.874182 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2hzx9" Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.887850 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" Nov 21 19:03:21 crc kubenswrapper[4701]: W1121 19:03:21.897533 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b8139c8_66be_4f40_a084_aa26d58554bb.slice/crio-5094b19c6891b6b5b4d7d8e4870ec929956b7a7bad86e95e2aaf66c698002301 WatchSource:0}: Error finding container 5094b19c6891b6b5b4d7d8e4870ec929956b7a7bad86e95e2aaf66c698002301: Status 404 returned error can't find the container with id 5094b19c6891b6b5b4d7d8e4870ec929956b7a7bad86e95e2aaf66c698002301 Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.897810 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wlgv5" Nov 21 19:03:21 crc kubenswrapper[4701]: W1121 19:03:21.910407 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6469e01b_cfc6_4ec9_87de_29c6eeee136f.slice/crio-ab62d59ee05d782218afad222e14bbdbe31d41ab31c38f4bff804cb85897a0c7 WatchSource:0}: Error finding container ab62d59ee05d782218afad222e14bbdbe31d41ab31c38f4bff804cb85897a0c7: Status 404 returned error can't find the container with id ab62d59ee05d782218afad222e14bbdbe31d41ab31c38f4bff804cb85897a0c7 Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.953436 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.954725 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.454708231 +0000 UTC m=+93.239848258 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:21 crc kubenswrapper[4701]: I1121 19:03:21.955494 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:21 crc kubenswrapper[4701]: E1121 19:03:21.957962 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.457945092 +0000 UTC m=+93.243085119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.013886 4701 patch_prober.go:28] interesting pod/router-default-5444994796-hsngh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 19:03:22 crc kubenswrapper[4701]: [-]has-synced failed: reason withheld Nov 21 19:03:22 crc kubenswrapper[4701]: [+]process-running ok Nov 21 19:03:22 crc kubenswrapper[4701]: healthz check failed Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.013987 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsngh" podUID="d04e099e-931d-4fe3-9d85-196a1d40ebd8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.065926 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.066283 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.566267106 +0000 UTC m=+93.351407123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.151179 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v6w6b"] Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.167151 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.167547 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.667532777 +0000 UTC m=+93.452672804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.272861 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.273264 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.773248756 +0000 UTC m=+93.558388783 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.283060 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zfhdw" podStartSLOduration=67.2830422 podStartE2EDuration="1m7.2830422s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:22.282593779 +0000 UTC m=+93.067733806" watchObservedRunningTime="2025-11-21 19:03:22.2830422 +0000 UTC m=+93.068182227" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.377548 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.378318 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.87829928 +0000 UTC m=+93.663439307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.485048 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.485454 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.985425078 +0000 UTC m=+93.770565095 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.485504 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.486616 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:22.985919449 +0000 UTC m=+93.771059476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.510676 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-hsngh" podStartSLOduration=67.510660539 podStartE2EDuration="1m7.510660539s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:22.509554004 +0000 UTC m=+93.294694031" watchObservedRunningTime="2025-11-21 19:03:22.510660539 +0000 UTC m=+93.295800556" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.585935 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.586335 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.086321981 +0000 UTC m=+93.871462008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.687882 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.688561 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.188549132 +0000 UTC m=+93.973689159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.718708 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" podStartSLOduration=67.718691581 podStartE2EDuration="1m7.718691581s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:22.717657728 +0000 UTC m=+93.502797755" watchObservedRunningTime="2025-11-21 19:03:22.718691581 +0000 UTC m=+93.503831608" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.794839 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.795430 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.295400435 +0000 UTC m=+94.080540482 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.902925 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:22 crc kubenswrapper[4701]: E1121 19:03:22.903516 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.403500035 +0000 UTC m=+94.188640062 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.920768 4701 patch_prober.go:28] interesting pod/router-default-5444994796-hsngh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 19:03:22 crc kubenswrapper[4701]: [-]has-synced failed: reason withheld Nov 21 19:03:22 crc kubenswrapper[4701]: [+]process-running ok Nov 21 19:03:22 crc kubenswrapper[4701]: healthz check failed Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.920812 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsngh" podUID="d04e099e-931d-4fe3-9d85-196a1d40ebd8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.922166 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" podStartSLOduration=67.922133712 podStartE2EDuration="1m7.922133712s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:22.890829419 +0000 UTC m=+93.675969446" watchObservedRunningTime="2025-11-21 19:03:22.922133712 +0000 UTC m=+93.707273739" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.926371 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" event={"ID":"dd5d82de-78d8-4e8e-93f3-fe6ede598dce","Type":"ContainerStarted","Data":"77102e1e03cd89d10ca1443d1aaffd56ff7e75bc8bb62345359604d305faaad7"} Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.927126 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.950343 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" event={"ID":"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc","Type":"ContainerStarted","Data":"1f0bda638b495c094cf4e42b19b32c85bbd04417ee22aee1aebf21c6277b17f0"} Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.950382 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" event={"ID":"6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc","Type":"ContainerStarted","Data":"e1349ee5b94caa1f18c23e25c235bd53ae7ae03e822941179faaaf461d445ba4"} Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.950729 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.962892 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.970626 4701 patch_prober.go:28] interesting pod/console-operator-58897d9998-7zd2w container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/readyz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.970679 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" podUID="6f35a6f5-6bdc-44cc-9314-8d0f90e1edfc" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/readyz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 21 19:03:22 crc kubenswrapper[4701]: I1121 19:03:22.971978 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" event={"ID":"9d183ad2-4424-442f-a662-3572267b54fd","Type":"ContainerStarted","Data":"08b3c5ebd13022a2fc4e547ab6c4bf787a432651f4efee1f6403b735f1f76dac"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.000591 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" event={"ID":"cfa749f0-83ce-4ba6-8a3e-e43257bdb907","Type":"ContainerStarted","Data":"42276190a1415e08649d75337ad0f8b56fdeaf40c247ea1b67000721adbe14ac"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.000640 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" event={"ID":"cfa749f0-83ce-4ba6-8a3e-e43257bdb907","Type":"ContainerStarted","Data":"2d43f36eaa4cea6f140c2f7b44f7ab41fac1d90f9877e0bcf1f621027e4c9f47"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.003801 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.004097 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.504079991 +0000 UTC m=+94.289220018 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.048444 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pz8rd" event={"ID":"12c6bbad-70a1-41c6-a818-d9ec535873e3","Type":"ContainerStarted","Data":"76dd779652c537ccadc08f937c8b29bc147db0160c344d796b7e6bc24dd033b1"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.085699 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a478166-ab28-4186-a9c2-f079c8b2f2d2" containerID="5fe4878dc6299d5df1267b6530341375b2bb524399454fa2e1232d3bed0d9a30" exitCode=0 Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.085771 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" event={"ID":"8a478166-ab28-4186-a9c2-f079c8b2f2d2","Type":"ContainerDied","Data":"5fe4878dc6299d5df1267b6530341375b2bb524399454fa2e1232d3bed0d9a30"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.106787 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.108694 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.608682155 +0000 UTC m=+94.393822182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.115595 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" event={"ID":"4b8139c8-66be-4f40-a084-aa26d58554bb","Type":"ContainerStarted","Data":"3b06c27be1e9c7873e2125a6f907c1d7aab513a28b4bffb8268ae416e3fbfd52"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.115640 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" event={"ID":"4b8139c8-66be-4f40-a084-aa26d58554bb","Type":"ContainerStarted","Data":"5094b19c6891b6b5b4d7d8e4870ec929956b7a7bad86e95e2aaf66c698002301"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.116450 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.135565 4701 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-gdj2w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.135621 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" podUID="4b8139c8-66be-4f40-a084-aa26d58554bb" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.146378 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wlgv5" event={"ID":"bbb8093d-f6ea-471b-a855-f7c6d5412f7b","Type":"ContainerStarted","Data":"d5cc813f77f1a4e9047b740a8a37602677c59bfae657bf42a6d59ee38b3c10c5"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.146452 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wlgv5" event={"ID":"bbb8093d-f6ea-471b-a855-f7c6d5412f7b","Type":"ContainerStarted","Data":"803cdafca83ea26645006883add36dc43dd4b5a9f94a6a70072a8f72f83e25b5"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.150674 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" event={"ID":"1da8a1e4-422a-45dc-aa36-ae559ea1dc14","Type":"ContainerStarted","Data":"bd52db6356fb76e0a0212a890c8a3c11a19349fb87eed9b8f59cc5cd5bc589a7"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.150734 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" event={"ID":"1da8a1e4-422a-45dc-aa36-ae559ea1dc14","Type":"ContainerStarted","Data":"71c881bd081507a50660d066682a7ea17df51100960c7ea0e7659d2dc387b427"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.153052 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" event={"ID":"6469e01b-cfc6-4ec9-87de-29c6eeee136f","Type":"ContainerStarted","Data":"bf3b7d6d35f5a959e5a7387c5350074f50255b8c1508336fcae8d3d62dab3a07"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.153094 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" event={"ID":"6469e01b-cfc6-4ec9-87de-29c6eeee136f","Type":"ContainerStarted","Data":"ab62d59ee05d782218afad222e14bbdbe31d41ab31c38f4bff804cb85897a0c7"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.155859 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" event={"ID":"b3e75990-afff-41bb-a78e-3d04223bbb6c","Type":"ContainerStarted","Data":"ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.155929 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" event={"ID":"b3e75990-afff-41bb-a78e-3d04223bbb6c","Type":"ContainerStarted","Data":"7b635e6c22b41b7ca93a8f9cb7c7782c18c5b31dc31619e1fd1a0c9a6398af02"} Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.160168 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.200835 4701 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-v6w6b container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.201161 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.207521 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.208821 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.708800491 +0000 UTC m=+94.493940528 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.249726 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-z6z69"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.258952 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.318752 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.320689 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.820676203 +0000 UTC m=+94.605816230 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.341523 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.363682 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.412611 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n6dgf" podStartSLOduration=68.41257183 podStartE2EDuration="1m8.41257183s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.40525611 +0000 UTC m=+94.190396137" watchObservedRunningTime="2025-11-21 19:03:23.41257183 +0000 UTC m=+94.197711857" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.421899 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.422310 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:23.922295072 +0000 UTC m=+94.707435099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.445794 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.455724 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-twmdp"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.478864 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-hbp5b"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.493287 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zmch7"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.495281 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-cwddx"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.507238 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rfbjh"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.511443 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.515685 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2b8wr"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.521702 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-zwplq" podStartSLOduration=68.521685212 podStartE2EDuration="1m8.521685212s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.508244998 +0000 UTC m=+94.293385025" watchObservedRunningTime="2025-11-21 19:03:23.521685212 +0000 UTC m=+94.306825239" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.534795 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.535324 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.03530975 +0000 UTC m=+94.820449777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.641297 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.641545 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw"] Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.641747 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.141730893 +0000 UTC m=+94.926870920 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.641995 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.642556 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.142546601 +0000 UTC m=+94.927686628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.650180 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.689060 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l7w9b" podStartSLOduration=68.689032485 podStartE2EDuration="1m8.689032485s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.662390214 +0000 UTC m=+94.447530241" watchObservedRunningTime="2025-11-21 19:03:23.689032485 +0000 UTC m=+94.474172512" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.693809 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.705944 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.719791 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" podStartSLOduration=68.719763116 podStartE2EDuration="1m8.719763116s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.717732502 +0000 UTC m=+94.502872529" watchObservedRunningTime="2025-11-21 19:03:23.719763116 +0000 UTC m=+94.504903143" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.747589 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-wlgv5" podStartSLOduration=5.7475609930000005 podStartE2EDuration="5.747560993s" podCreationTimestamp="2025-11-21 19:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.743561476 +0000 UTC m=+94.528701503" watchObservedRunningTime="2025-11-21 19:03:23.747560993 +0000 UTC m=+94.532701020" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.748861 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.749064 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.249041836 +0000 UTC m=+95.034181863 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.749424 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.753303 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.253268367 +0000 UTC m=+95.038408394 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.772916 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.781668 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.784035 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" podStartSLOduration=68.784014519 podStartE2EDuration="1m8.784014519s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.762542611 +0000 UTC m=+94.547682638" watchObservedRunningTime="2025-11-21 19:03:23.784014519 +0000 UTC m=+94.569154546" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.802931 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.816753 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.817086 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s9t62" podStartSLOduration=68.81706683 podStartE2EDuration="1m8.81706683s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.815598169 +0000 UTC m=+94.600738196" watchObservedRunningTime="2025-11-21 19:03:23.81706683 +0000 UTC m=+94.602206847" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.850755 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.851092 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.351076463 +0000 UTC m=+95.136216490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.856534 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-scrql" podStartSLOduration=68.856512282 podStartE2EDuration="1m8.856512282s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.855440359 +0000 UTC m=+94.640580386" watchObservedRunningTime="2025-11-21 19:03:23.856512282 +0000 UTC m=+94.641652309" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.863787 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv"] Nov 21 19:03:23 crc kubenswrapper[4701]: W1121 19:03:23.874146 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57fc3016_4534_4be7_a281_b353f13830b9.slice/crio-46e4b09a21810e01dc67d314e180b9b21cd490c2168a7a1db70141bbb820665e WatchSource:0}: Error finding container 46e4b09a21810e01dc67d314e180b9b21cd490c2168a7a1db70141bbb820665e: Status 404 returned error can't find the container with id 46e4b09a21810e01dc67d314e180b9b21cd490c2168a7a1db70141bbb820665e Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.896142 4701 patch_prober.go:28] interesting pod/router-default-5444994796-hsngh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 19:03:23 crc kubenswrapper[4701]: [-]has-synced failed: reason withheld Nov 21 19:03:23 crc kubenswrapper[4701]: [+]process-running ok Nov 21 19:03:23 crc kubenswrapper[4701]: healthz check failed Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.896539 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsngh" podUID="d04e099e-931d-4fe3-9d85-196a1d40ebd8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.920352 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.921597 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.953905 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:23 crc kubenswrapper[4701]: E1121 19:03:23.954564 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.454529802 +0000 UTC m=+95.239669829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.970317 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" podStartSLOduration=68.970296526 podStartE2EDuration="1m8.970296526s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.897026067 +0000 UTC m=+94.682166094" watchObservedRunningTime="2025-11-21 19:03:23.970296526 +0000 UTC m=+94.755436553" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.989643 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-gzwfn" podStartSLOduration=68.989620708 podStartE2EDuration="1m8.989620708s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:23.966737109 +0000 UTC m=+94.751877136" watchObservedRunningTime="2025-11-21 19:03:23.989620708 +0000 UTC m=+94.774760745" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.993160 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f"] Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.993366 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:23 crc kubenswrapper[4701]: I1121 19:03:23.993391 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2hzx9"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.023452 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-f2jgv"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.025696 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9d774"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.035590 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nkl92"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.038614 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.046484 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.048183 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nkl92"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.059532 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.060114 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.560074846 +0000 UTC m=+95.345215033 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.060286 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-catalog-content\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.060359 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-utilities\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.060398 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76lkf\" (UniqueName: \"kubernetes.io/projected/cb02901b-e5a6-4059-b49c-9011bfb481c9-kube-api-access-76lkf\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.060438 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.062814 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.562792436 +0000 UTC m=+95.347932463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: W1121 19:03:24.154732 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a518559_4b60_4e05_b0e1_7b2ef4b30817.slice/crio-15626d3ec9381f12d5faab3983daad392cb29ccdde7db450e2b0e9ee5bc73312 WatchSource:0}: Error finding container 15626d3ec9381f12d5faab3983daad392cb29ccdde7db450e2b0e9ee5bc73312: Status 404 returned error can't find the container with id 15626d3ec9381f12d5faab3983daad392cb29ccdde7db450e2b0e9ee5bc73312 Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.163627 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.163771 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.663740529 +0000 UTC m=+95.448880556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.168834 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-catalog-content\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.168895 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-utilities\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.168928 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76lkf\" (UniqueName: \"kubernetes.io/projected/cb02901b-e5a6-4059-b49c-9011bfb481c9-kube-api-access-76lkf\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.169009 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.169546 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.669521666 +0000 UTC m=+95.454661693 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.169577 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-catalog-content\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.169884 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-utilities\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.203542 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76lkf\" (UniqueName: \"kubernetes.io/projected/cb02901b-e5a6-4059-b49c-9011bfb481c9-kube-api-access-76lkf\") pod \"certified-operators-nkl92\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.203747 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" event={"ID":"8a478166-ab28-4186-a9c2-f079c8b2f2d2","Type":"ContainerStarted","Data":"0a8ddadd8ebeb37fa813beaf74853e2448baaad469188147ccef67cdaa21aa2a"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.204413 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.207098 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" event={"ID":"4167d110-2211-4862-af3d-b6b4a88a0bfd","Type":"ContainerStarted","Data":"f27f6da2a8d5df7efa9f0b995359cca551c540a1413af003af00ba0242284abc"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.228825 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" event={"ID":"1c076709-6bae-4b64-9169-2aed68c813cd","Type":"ContainerStarted","Data":"b1ac22bd60657c3b43a1ec48a1697c7bd1a68477974863979148a4320273723d"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.232974 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" event={"ID":"f741f928-61fd-41d5-b8c8-879a4744fa2e","Type":"ContainerStarted","Data":"9c837a2881e1af687684962acf5bf64d741f0864d65cfda97a33bbd0808e08ac"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.233016 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" event={"ID":"f741f928-61fd-41d5-b8c8-879a4744fa2e","Type":"ContainerStarted","Data":"95dfc6b857366cffae8e708e3f3e536afd9ea954e54f5f5debf46ceb2206085e"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.233937 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r9glm"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.234952 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.236336 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" podStartSLOduration=69.236324734 podStartE2EDuration="1m9.236324734s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.225580149 +0000 UTC m=+95.010720176" watchObservedRunningTime="2025-11-21 19:03:24.236324734 +0000 UTC m=+95.021464761" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.236604 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" event={"ID":"a2f5b911-dc9c-4009-a4b0-da201a34f156","Type":"ContainerStarted","Data":"134ce6681c8b82bb4948b14c900dadacc393dbabf38ee53e550957af05842067"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.237924 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.241645 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r9glm"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.242983 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" event={"ID":"3dc5a393-ee87-4a1c-b786-9523a05de343","Type":"ContainerStarted","Data":"806ac31642299e7ef21d694fc2b9ff506c1e13cf90a228ad7b636ec8daaa6252"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.246212 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" event={"ID":"601c0380-cc9d-4363-94be-92be6aeb94ac","Type":"ContainerStarted","Data":"0ddeb272d896505b34b331fd8b07d66b5c84e113da515f99b379d096e49b9044"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.248055 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" event={"ID":"8b089186-0669-456a-bea2-60b001261161","Type":"ContainerStarted","Data":"ad3e7e3359c60a708e6921c1be21fb098dda4e37b397f802ea72290f11cfd82c"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.249855 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9d774" event={"ID":"4a518559-4b60-4e05-b0e1-7b2ef4b30817","Type":"ContainerStarted","Data":"15626d3ec9381f12d5faab3983daad392cb29ccdde7db450e2b0e9ee5bc73312"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.252367 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-cwddx" event={"ID":"f7def574-9941-4933-83df-3f20df5797d4","Type":"ContainerStarted","Data":"2153e73e7611e6c8cc808341bb0fa415e25bc859ed2941398dbf12779cac1cd3"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.262682 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" event={"ID":"57fc3016-4534-4be7-a281-b353f13830b9","Type":"ContainerStarted","Data":"46e4b09a21810e01dc67d314e180b9b21cd490c2168a7a1db70141bbb820665e"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.265382 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" event={"ID":"2ea15e3b-bc7e-491a-8cd7-4275d33abf23","Type":"ContainerStarted","Data":"2f718a96a67ffaf038526ab1e3704dcd09f1c9c4cf9ac3154e749b939f0e88fb"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.265412 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" event={"ID":"2ea15e3b-bc7e-491a-8cd7-4275d33abf23","Type":"ContainerStarted","Data":"a8ac570d087f9e1992bc667db301efe3b5dc16dd9fd37d920303275f6d06d9a7"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.267261 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" event={"ID":"6f9560d6-8ab5-46f8-bd69-7bca42610547","Type":"ContainerStarted","Data":"909bca7fc0d7d1d9cb9789eb9e61a84bda9737f921981d326bda9e9f9a3f0f32"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.267627 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" event={"ID":"6f9560d6-8ab5-46f8-bd69-7bca42610547","Type":"ContainerStarted","Data":"822249d98d6f81afea9cb394b87a9a82610ee56f710ff072269e9dca46345c3a"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.268860 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" event={"ID":"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7","Type":"ContainerStarted","Data":"1605d44415fdd8f36a1c18f662159cad2813ca8814a2749b2524e427c3205fa4"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.269682 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.269877 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-utilities\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.269909 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dlbd\" (UniqueName: \"kubernetes.io/projected/dde062d0-393b-4b35-80ec-c3f67c2a5129-kube-api-access-9dlbd\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.270007 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.769971669 +0000 UTC m=+95.555111706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.270172 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.270488 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-catalog-content\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.271137 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.771120253 +0000 UTC m=+95.556260280 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.298170 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" event={"ID":"1da8a1e4-422a-45dc-aa36-ae559ea1dc14","Type":"ContainerStarted","Data":"07345a47556f4cfa3fc2fb5a4b3b248f8e50bce136496e90651ed935bc3f6684"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.323592 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" event={"ID":"276e2cb3-e02e-4122-b10b-a454198b7954","Type":"ContainerStarted","Data":"43c55175c367845afbb71d4d61ac30931c3eaa6f41b08f77af904180bcecf56f"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.323644 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" event={"ID":"276e2cb3-e02e-4122-b10b-a454198b7954","Type":"ContainerStarted","Data":"a34c88325ebff25c37ad7a03259d4ba53ce1021b21efcabe8fba6525e24164d4"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.330609 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" event={"ID":"92358a71-dd66-49c9-8cc2-83cf555207d4","Type":"ContainerStarted","Data":"734d24871a4aeb97c4ed7630fc2de90118683624d1629dffa59214428a82a748"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.334384 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-zmch7" podStartSLOduration=69.334365905 podStartE2EDuration="1m9.334365905s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.333101707 +0000 UTC m=+95.118241734" watchObservedRunningTime="2025-11-21 19:03:24.334365905 +0000 UTC m=+95.119505932" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.353540 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" event={"ID":"56606974-5260-4587-b1cd-17e7ad12868b","Type":"ContainerStarted","Data":"928a4661d76e92783dd41516ec6aac4144450021d29b55ff38c31d00f06aeaed"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.353597 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" event={"ID":"56606974-5260-4587-b1cd-17e7ad12868b","Type":"ContainerStarted","Data":"235616b8519b7fbc5599f31a61914b8e7b3a03c487d8954d17b8f65b8546b3e0"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.366958 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" event={"ID":"9c46cea4-80aa-4a56-9370-0b7f5331c1ee","Type":"ContainerStarted","Data":"be684810f8709f028c158620395dd7c442668f45794221a71d17ebd560650848"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.371636 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.371782 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-utilities\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.371805 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dlbd\" (UniqueName: \"kubernetes.io/projected/dde062d0-393b-4b35-80ec-c3f67c2a5129-kube-api-access-9dlbd\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.372016 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.872001866 +0000 UTC m=+95.657141893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.372590 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-utilities\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.373011 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hcwlb" podStartSLOduration=69.373000328 podStartE2EDuration="1m9.373000328s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.354417882 +0000 UTC m=+95.139557909" watchObservedRunningTime="2025-11-21 19:03:24.373000328 +0000 UTC m=+95.158140355" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.374087 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-catalog-content\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.374716 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-catalog-content\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.376376 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" event={"ID":"7e4a0b68-874d-4da2-9286-12b84e37e090","Type":"ContainerStarted","Data":"1239f76ca26748110a0a6c40a87e01fd16e6cd33b4fff810f93e485393c56ae1"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.376424 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" event={"ID":"7e4a0b68-874d-4da2-9286-12b84e37e090","Type":"ContainerStarted","Data":"3efd7632d80afa6c2a0d8670a5a564dbbcf60398a8376ccff01fc84c04b8a487"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.376820 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9rlms" podStartSLOduration=69.376812081 podStartE2EDuration="1m9.376812081s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.372604629 +0000 UTC m=+95.157744646" watchObservedRunningTime="2025-11-21 19:03:24.376812081 +0000 UTC m=+95.161952108" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.386136 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" event={"ID":"4c931253-2864-49ef-a35a-0e7c04e2d75e","Type":"ContainerStarted","Data":"e5791a8f2961068cbbecf5a73dcae18f450164787540fd70fe864fa061187089"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.396040 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2hzx9" event={"ID":"4fb5ab32-dc0b-40c0-8af0-7cffba411a22","Type":"ContainerStarted","Data":"9d6342d913853ba281c563a6baa85a79799990f070ff17c4c97b8185f7a2c44f"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.398369 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" podStartSLOduration=69.398357402 podStartE2EDuration="1m9.398357402s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.395664793 +0000 UTC m=+95.180804820" watchObservedRunningTime="2025-11-21 19:03:24.398357402 +0000 UTC m=+95.183497429" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.401986 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" event={"ID":"aaed9de5-8fa2-4493-ab19-2a79c17c6241","Type":"ContainerStarted","Data":"f655da003564cfbb624d7bf193d252e744019ca03b03ba91afebeb57c9bae872"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.414684 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dlbd\" (UniqueName: \"kubernetes.io/projected/dde062d0-393b-4b35-80ec-c3f67c2a5129-kube-api-access-9dlbd\") pod \"community-operators-r9glm\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.423908 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" podStartSLOduration=70.423890689 podStartE2EDuration="1m10.423890689s" podCreationTimestamp="2025-11-21 19:02:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.4230396 +0000 UTC m=+95.208179627" watchObservedRunningTime="2025-11-21 19:03:24.423890689 +0000 UTC m=+95.209030716" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.424113 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xjpsq"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.425435 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.425473 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hbp5b" event={"ID":"5d7aff3f-cf94-411b-b23b-c91f58cdc2f6","Type":"ContainerStarted","Data":"3d27f31e8d597e37388af64362591a3da28b468bc51ae6ea212de414cc6836f1"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.426302 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.433328 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.433378 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.440834 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xjpsq"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.444002 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" event={"ID":"67a65cf0-6dcb-4730-a43b-0be90f5c8a93","Type":"ContainerStarted","Data":"d8e3e1adb5add18ed64aa580b58ee8eb8d7f40abda70dfc0c7eddfbf0dc9de39"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.445891 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.448852 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" event={"ID":"b6dceb79-b806-4504-bc30-70497679c75d","Type":"ContainerStarted","Data":"273f312ebc36f55970c845e6d1dcf1eef174929799b169a496b8de88322be018"} Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.449843 4701 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-v6w6b container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.449892 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.467033 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-8gkzf" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.467282 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.475285 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-hbp5b" podStartSLOduration=69.475268671 podStartE2EDuration="1m9.475268671s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:24.472570682 +0000 UTC m=+95.257710719" watchObservedRunningTime="2025-11-21 19:03:24.475268671 +0000 UTC m=+95.260408698" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.476852 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-utilities\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.477172 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddzdt\" (UniqueName: \"kubernetes.io/projected/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-kube-api-access-ddzdt\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.477350 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.477399 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-catalog-content\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.482787 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:24.982768595 +0000 UTC m=+95.767908692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.578732 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.579112 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-catalog-content\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.579305 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-utilities\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.579395 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddzdt\" (UniqueName: \"kubernetes.io/projected/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-kube-api-access-ddzdt\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.579809 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.079793362 +0000 UTC m=+95.864933389 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.580179 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-catalog-content\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.580494 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-utilities\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.625658 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddzdt\" (UniqueName: \"kubernetes.io/projected/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-kube-api-access-ddzdt\") pod \"certified-operators-xjpsq\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.656285 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.664013 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7xdws"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.665248 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.680481 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-utilities\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.681002 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-catalog-content\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.681136 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.681258 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppk7z\" (UniqueName: \"kubernetes.io/projected/02015e4f-1f27-4004-8bc6-778e6db5fb94-kube-api-access-ppk7z\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.682770 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.18275585 +0000 UTC m=+95.967895877 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.736252 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7xdws"] Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.736508 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-7zd2w" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.759490 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.781893 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.782026 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-utilities\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.782085 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-catalog-content\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.782154 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppk7z\" (UniqueName: \"kubernetes.io/projected/02015e4f-1f27-4004-8bc6-778e6db5fb94-kube-api-access-ppk7z\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.782523 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.282510279 +0000 UTC m=+96.067650296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.785541 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-utilities\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.785785 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-catalog-content\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.842996 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppk7z\" (UniqueName: \"kubernetes.io/projected/02015e4f-1f27-4004-8bc6-778e6db5fb94-kube-api-access-ppk7z\") pod \"community-operators-7xdws\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.873890 4701 patch_prober.go:28] interesting pod/router-default-5444994796-hsngh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 19:03:24 crc kubenswrapper[4701]: [-]has-synced failed: reason withheld Nov 21 19:03:24 crc kubenswrapper[4701]: [+]process-running ok Nov 21 19:03:24 crc kubenswrapper[4701]: healthz check failed Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.873940 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsngh" podUID="d04e099e-931d-4fe3-9d85-196a1d40ebd8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.883490 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.883990 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.383978424 +0000 UTC m=+96.169118451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.914967 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:03:24 crc kubenswrapper[4701]: I1121 19:03:24.985102 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:24 crc kubenswrapper[4701]: E1121 19:03:24.986024 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.485939289 +0000 UTC m=+96.271079316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.088789 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.089830 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.589818488 +0000 UTC m=+96.374958515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.189923 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.190098 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.690073286 +0000 UTC m=+96.475213313 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.190620 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.190948 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.690935806 +0000 UTC m=+96.476075833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.291754 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.292025 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.792010212 +0000 UTC m=+96.577150239 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.393351 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.393868 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.893843365 +0000 UTC m=+96.678983392 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.487503 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfbjh" event={"ID":"8b089186-0669-456a-bea2-60b001261161","Type":"ContainerStarted","Data":"209ee46436363de0f071f3b1a7cd3abb8a4b40735c550c1c84a8377dd3cf675d"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.495208 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.496227 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:25.99619792 +0000 UTC m=+96.781337947 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.509445 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" event={"ID":"9c46cea4-80aa-4a56-9370-0b7f5331c1ee","Type":"ContainerStarted","Data":"af665c49d693ad89e4aeb8592d205b9918f6cd75b1cd34a909149dc1d7a0168e"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.509524 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" event={"ID":"9c46cea4-80aa-4a56-9370-0b7f5331c1ee","Type":"ContainerStarted","Data":"5985ec4ef484148032a188674aa5cd8e1ae3ae9ca477e092dc04285a684732b8"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.538953 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w4fcr" podStartSLOduration=70.538922663 podStartE2EDuration="1m10.538922663s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.538537555 +0000 UTC m=+96.323677592" watchObservedRunningTime="2025-11-21 19:03:25.538922663 +0000 UTC m=+96.324062690" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.593812 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-cwddx" event={"ID":"f7def574-9941-4933-83df-3f20df5797d4","Type":"ContainerStarted","Data":"b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.596425 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.608958 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.108935271 +0000 UTC m=+96.894075488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.618449 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" event={"ID":"57fc3016-4534-4be7-a281-b353f13830b9","Type":"ContainerStarted","Data":"e56568b2e5b603bb7a9a04f17cfb72813acf790d2eb5f5b31c373f790fbfcf16"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.630782 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" event={"ID":"4167d110-2211-4862-af3d-b6b4a88a0bfd","Type":"ContainerStarted","Data":"7c213ac2c74f637d0acca04440145ae3e7f1ff8c6d8d87f1b60f05941b0b3e02"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.631990 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.647817 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-cwddx" podStartSLOduration=70.64780007 podStartE2EDuration="1m10.64780007s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.645097771 +0000 UTC m=+96.430237798" watchObservedRunningTime="2025-11-21 19:03:25.64780007 +0000 UTC m=+96.432940097" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.651908 4701 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-75d7z container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.651941 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" podUID="4167d110-2211-4862-af3d-b6b4a88a0bfd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.656807 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" event={"ID":"f741f928-61fd-41d5-b8c8-879a4744fa2e","Type":"ContainerStarted","Data":"1736b2185eef9ad1edb0726d86273fe7214345ad7a08c16f8b748c52fca07ff2"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.679063 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" event={"ID":"601c0380-cc9d-4363-94be-92be6aeb94ac","Type":"ContainerStarted","Data":"4cfa5f2cd96d884dcc96c048ee19ac8afcd98a07a0ea35f46e2dbed10acbe589"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.697392 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.698722 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.198707141 +0000 UTC m=+96.983847168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.719596 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" event={"ID":"aaed9de5-8fa2-4493-ab19-2a79c17c6241","Type":"ContainerStarted","Data":"b154d73765a230d753e7734eb3c48b31753c40fd459c4f821d5dfbf9b590b946"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.743448 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jbq9k" podStartSLOduration=70.743428178 podStartE2EDuration="1m10.743428178s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.741534986 +0000 UTC m=+96.526675013" watchObservedRunningTime="2025-11-21 19:03:25.743428178 +0000 UTC m=+96.528568205" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.744200 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" podStartSLOduration=70.744191514 podStartE2EDuration="1m10.744191514s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.679295417 +0000 UTC m=+96.464435444" watchObservedRunningTime="2025-11-21 19:03:25.744191514 +0000 UTC m=+96.529331541" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.768482 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" event={"ID":"4c931253-2864-49ef-a35a-0e7c04e2d75e","Type":"ContainerStarted","Data":"541499d5958780067dfd70d9186cb93c7c7ce1259e2b953f63d014cbae0ee9ee"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.770323 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-v5bsv" podStartSLOduration=70.770299154 podStartE2EDuration="1m10.770299154s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.768838072 +0000 UTC m=+96.553978099" watchObservedRunningTime="2025-11-21 19:03:25.770299154 +0000 UTC m=+96.555439181" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.791448 4701 generic.go:334] "Generic (PLEG): container finished" podID="67a65cf0-6dcb-4730-a43b-0be90f5c8a93" containerID="9cd76d69758657f9e92015b227dd786729414a1406a4e6762f146ea3fbf20630" exitCode=0 Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.791543 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" event={"ID":"67a65cf0-6dcb-4730-a43b-0be90f5c8a93","Type":"ContainerDied","Data":"9cd76d69758657f9e92015b227dd786729414a1406a4e6762f146ea3fbf20630"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.799290 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.804151 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.304134343 +0000 UTC m=+97.089274370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.815733 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-z6z69" podStartSLOduration=70.815694656 podStartE2EDuration="1m10.815694656s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.79619105 +0000 UTC m=+96.581331087" watchObservedRunningTime="2025-11-21 19:03:25.815694656 +0000 UTC m=+96.600834683" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.846076 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4kqhw" podStartSLOduration=70.846049077 podStartE2EDuration="1m10.846049077s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.833849111 +0000 UTC m=+96.618989138" watchObservedRunningTime="2025-11-21 19:03:25.846049077 +0000 UTC m=+96.631189104" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.854452 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" event={"ID":"92358a71-dd66-49c9-8cc2-83cf555207d4","Type":"ContainerStarted","Data":"c1478dda96aff86c4ab099b781fc040e96ab9985c399c5e4755faea36b4ccfdd"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.854587 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.870455 4701 patch_prober.go:28] interesting pod/router-default-5444994796-hsngh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 19:03:25 crc kubenswrapper[4701]: [-]has-synced failed: reason withheld Nov 21 19:03:25 crc kubenswrapper[4701]: [+]process-running ok Nov 21 19:03:25 crc kubenswrapper[4701]: healthz check failed Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.870518 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsngh" podUID="d04e099e-931d-4fe3-9d85-196a1d40ebd8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.879246 4701 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-mqbkq container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.879330 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" podUID="92358a71-dd66-49c9-8cc2-83cf555207d4" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.888906 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xjpsq"] Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.930140 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.931910 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" event={"ID":"7e4a0b68-874d-4da2-9286-12b84e37e090","Type":"ContainerStarted","Data":"5ba8fc100193a386f7e649349b3a6d127d5768186fe50fed100c9fba22da9f68"} Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.931940 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:03:25 crc kubenswrapper[4701]: I1121 19:03:25.943520 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" podStartSLOduration=70.943502135 podStartE2EDuration="1m10.943502135s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.93227153 +0000 UTC m=+96.717411557" watchObservedRunningTime="2025-11-21 19:03:25.943502135 +0000 UTC m=+96.728642162" Nov 21 19:03:25 crc kubenswrapper[4701]: E1121 19:03:25.952006 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.45198209 +0000 UTC m=+97.237122117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.010962 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" podStartSLOduration=71.010831235 podStartE2EDuration="1m11.010831235s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.009733802 +0000 UTC m=+96.794873829" watchObservedRunningTime="2025-11-21 19:03:26.010831235 +0000 UTC m=+96.795971262" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.022451 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" event={"ID":"4f9fb2aa-8dfa-47d7-8e35-3b6267d85ef7","Type":"ContainerStarted","Data":"625e613bae090456c56bcbdbbd726aa6586879492ca36ab914c380cce9fea5b1"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.023506 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" podStartSLOduration=71.023482792 podStartE2EDuration="1m11.023482792s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:25.974841749 +0000 UTC m=+96.759981766" watchObservedRunningTime="2025-11-21 19:03:26.023482792 +0000 UTC m=+96.808622819" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.033149 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.035571 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.535554745 +0000 UTC m=+97.320694772 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.077238 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hbp5b" event={"ID":"5d7aff3f-cf94-411b-b23b-c91f58cdc2f6","Type":"ContainerStarted","Data":"b61b369a5425bd561ff7080fe46620efc5f93f49b5cf2c3754b836ff3b95c7e0"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.088770 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.088851 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.132371 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" event={"ID":"1c076709-6bae-4b64-9169-2aed68c813cd","Type":"ContainerStarted","Data":"1bf6d3bdd87a8c3cd3d9e24114b9f38af66feaec38320d628ea692b7a0bb8f1d"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.134898 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.135561 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.135823 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.635794494 +0000 UTC m=+97.420934521 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.136893 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.137532 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.637524892 +0000 UTC m=+97.422664919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.146396 4701 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7wcwn container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:5443/healthz\": dial tcp 10.217.0.14:5443: connect: connection refused" start-of-body= Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.146468 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" podUID="1c076709-6bae-4b64-9169-2aed68c813cd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.14:5443/healthz\": dial tcp 10.217.0.14:5443: connect: connection refused" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.151707 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nzzw5" podStartSLOduration=71.151690771 podStartE2EDuration="1m11.151690771s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.078468832 +0000 UTC m=+96.863608859" watchObservedRunningTime="2025-11-21 19:03:26.151690771 +0000 UTC m=+96.936830798" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.153190 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7xdws"] Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.161279 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nkl92"] Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.186510 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" podStartSLOduration=71.18647652 podStartE2EDuration="1m11.18647652s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.164308836 +0000 UTC m=+96.949448863" watchObservedRunningTime="2025-11-21 19:03:26.18647652 +0000 UTC m=+96.971616547" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.190812 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" event={"ID":"a2f5b911-dc9c-4009-a4b0-da201a34f156","Type":"ContainerStarted","Data":"7c2c493ff54092ed7869db47f255b23f0ac1377ef540066b81c93355a0bd42d9"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.191776 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.201247 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" event={"ID":"56606974-5260-4587-b1cd-17e7ad12868b","Type":"ContainerStarted","Data":"281e8d78011ab802258dde0c40bc53ade45b9e7a0813188cc1bc92b1d0efd7c2"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.220695 4701 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-twmdp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.37:6443/healthz\": dial tcp 10.217.0.37:6443: connect: connection refused" start-of-body= Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.220752 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" podUID="a2f5b911-dc9c-4009-a4b0-da201a34f156" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.37:6443/healthz\": dial tcp 10.217.0.37:6443: connect: connection refused" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.232331 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rcsbh"] Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.233439 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.237642 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.238698 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.73868412 +0000 UTC m=+97.523824137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.247850 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.249680 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" event={"ID":"3dc5a393-ee87-4a1c-b786-9523a05de343","Type":"ContainerStarted","Data":"ac764ece84ff51a52a18a5d9e60412b60b3e116590b840948814c820d1739f63"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.264442 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" podStartSLOduration=71.264419512 podStartE2EDuration="1m11.264419512s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.247480192 +0000 UTC m=+97.032620219" watchObservedRunningTime="2025-11-21 19:03:26.264419512 +0000 UTC m=+97.049559529" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.296166 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcsbh"] Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.296335 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" event={"ID":"a59af599-0d79-4301-8c37-e0e7189477ad","Type":"ContainerStarted","Data":"a352a637556d91ae9d5bd204d37c7d9144bdc20ead0d597d9c81f59aa76566aa"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.296356 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" event={"ID":"a59af599-0d79-4301-8c37-e0e7189477ad","Type":"ContainerStarted","Data":"7774eda68694d51101e5236b383274dac1d0c685c2a3e81b7d97cb73dc0eb483"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.349405 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-utilities\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.349573 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-catalog-content\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.349681 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slgtp\" (UniqueName: \"kubernetes.io/projected/795df5e4-162f-49a8-8316-2307c03a3f2d-kube-api-access-slgtp\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.349720 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.350671 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.850655585 +0000 UTC m=+97.635795612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.374302 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2hzx9" event={"ID":"4fb5ab32-dc0b-40c0-8af0-7cffba411a22","Type":"ContainerStarted","Data":"3446b313557b01ecc1ddee055673ece23d7a42c02b1a72ff027a54838a8f1f0f"} Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.376267 4701 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-v6w6b container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.376304 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.381955 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r9glm"] Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.421037 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-76rvr" podStartSLOduration=71.421021481 podStartE2EDuration="1m11.421021481s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.412178218 +0000 UTC m=+97.197318245" watchObservedRunningTime="2025-11-21 19:03:26.421021481 +0000 UTC m=+97.206161508" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.469927 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zsvsv" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.470277 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.470905 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.97087838 +0000 UTC m=+97.756018407 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.472589 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-utilities\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.472879 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-catalog-content\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.473258 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slgtp\" (UniqueName: \"kubernetes.io/projected/795df5e4-162f-49a8-8316-2307c03a3f2d-kube-api-access-slgtp\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.473341 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.487194 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-catalog-content\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.490182 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-utilities\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.498564 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:26.998538453 +0000 UTC m=+97.783678480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.535673 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" podStartSLOduration=71.535638344 podStartE2EDuration="1m11.535638344s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.526539834 +0000 UTC m=+97.311679861" watchObservedRunningTime="2025-11-21 19:03:26.535638344 +0000 UTC m=+97.320778371" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.541230 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slgtp\" (UniqueName: \"kubernetes.io/projected/795df5e4-162f-49a8-8316-2307c03a3f2d-kube-api-access-slgtp\") pod \"redhat-marketplace-rcsbh\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.580285 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.590504 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.599577 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.099525518 +0000 UTC m=+97.884665545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.609555 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" podStartSLOduration=71.609523796 podStartE2EDuration="1m11.609523796s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.575660307 +0000 UTC m=+97.360800334" watchObservedRunningTime="2025-11-21 19:03:26.609523796 +0000 UTC m=+97.394663823" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.647064 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-2hzx9" podStartSLOduration=8.647019844999999 podStartE2EDuration="8.647019845s" podCreationTimestamp="2025-11-21 19:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:26.614427314 +0000 UTC m=+97.399567341" watchObservedRunningTime="2025-11-21 19:03:26.647019845 +0000 UTC m=+97.432159872" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.670116 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h6bck"] Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.682743 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.707744 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-catalog-content\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.707806 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-utilities\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.707836 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsddx\" (UniqueName: \"kubernetes.io/projected/53624f72-bfb0-4be5-b00a-f06de73ae1f0-kube-api-access-xsddx\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.707858 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.708150 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.208138049 +0000 UTC m=+97.993278076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.713501 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6bck"] Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.815404 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.31538133 +0000 UTC m=+98.100521357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.820221 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.820705 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-catalog-content\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.820850 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-utilities\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.820905 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsddx\" (UniqueName: \"kubernetes.io/projected/53624f72-bfb0-4be5-b00a-f06de73ae1f0-kube-api-access-xsddx\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.820933 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.821363 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.321351491 +0000 UTC m=+98.106491518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.821706 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-catalog-content\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.821915 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-utilities\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.871483 4701 patch_prober.go:28] interesting pod/router-default-5444994796-hsngh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 19:03:26 crc kubenswrapper[4701]: [-]has-synced failed: reason withheld Nov 21 19:03:26 crc kubenswrapper[4701]: [+]process-running ok Nov 21 19:03:26 crc kubenswrapper[4701]: healthz check failed Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.871565 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-hsngh" podUID="d04e099e-931d-4fe3-9d85-196a1d40ebd8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.893106 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsddx\" (UniqueName: \"kubernetes.io/projected/53624f72-bfb0-4be5-b00a-f06de73ae1f0-kube-api-access-xsddx\") pod \"redhat-marketplace-h6bck\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:26 crc kubenswrapper[4701]: I1121 19:03:26.926679 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:26 crc kubenswrapper[4701]: E1121 19:03:26.927410 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.427381125 +0000 UTC m=+98.212521152 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.028298 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.028693 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.528675328 +0000 UTC m=+98.313815355 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.119561 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.130828 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.131300 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.631285877 +0000 UTC m=+98.416425904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.233048 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.233476 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.733463479 +0000 UTC m=+98.518603506 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.237453 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w6nbm"] Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.238465 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.254915 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.270184 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w6nbm"] Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.334215 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.334354 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.834322681 +0000 UTC m=+98.619462708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.334734 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-catalog-content\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.334790 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh648\" (UniqueName: \"kubernetes.io/projected/90bb3a29-464a-4967-a009-d4fa6b92de73-kube-api-access-fh648\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.335025 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.335150 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-utilities\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.335467 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.835456625 +0000 UTC m=+98.620596652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.422322 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zt9ht" event={"ID":"4c931253-2864-49ef-a35a-0e7c04e2d75e","Type":"ContainerStarted","Data":"c2797d7155f1d54118ea0a6f47a71b91d524aa50866ada02b65b6eb3d1ac5f30"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.435988 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.436540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-utilities\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.436587 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-catalog-content\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.436613 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh648\" (UniqueName: \"kubernetes.io/projected/90bb3a29-464a-4967-a009-d4fa6b92de73-kube-api-access-fh648\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.437797 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-utilities\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.437890 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:27.937872631 +0000 UTC m=+98.723012658 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.438072 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-catalog-content\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.454740 4701 generic.go:334] "Generic (PLEG): container finished" podID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerID="db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684" exitCode=0 Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.454820 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerDied","Data":"db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.454848 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerStarted","Data":"75b888e6efa5045f31611006d4690008362bb37ca541e3c2b01ad1726894bb6d"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.462665 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.476890 4701 generic.go:334] "Generic (PLEG): container finished" podID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerID="a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d" exitCode=0 Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.477195 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9glm" event={"ID":"dde062d0-393b-4b35-80ec-c3f67c2a5129","Type":"ContainerDied","Data":"a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.477253 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9glm" event={"ID":"dde062d0-393b-4b35-80ec-c3f67c2a5129","Type":"ContainerStarted","Data":"f9f409d94f6458181b5001b9cd9a5b1f664ebf69bfbe98b35903f7c81632f72c"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.478106 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh648\" (UniqueName: \"kubernetes.io/projected/90bb3a29-464a-4967-a009-d4fa6b92de73-kube-api-access-fh648\") pod \"redhat-operators-w6nbm\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.492557 4701 generic.go:334] "Generic (PLEG): container finished" podID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerID="0a052ec5790244fa183eec3679f81c5c3d55a9049d96ef99f670800f267fbfeb" exitCode=0 Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.492863 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjpsq" event={"ID":"4b7dd16f-9a0f-406f-84b4-cc94baf5405c","Type":"ContainerDied","Data":"0a052ec5790244fa183eec3679f81c5c3d55a9049d96ef99f670800f267fbfeb"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.492920 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjpsq" event={"ID":"4b7dd16f-9a0f-406f-84b4-cc94baf5405c","Type":"ContainerStarted","Data":"07bce4eb0850245b385a494d55391adfd9b954e0a1d1227506fabd7851769060"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.505031 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcsbh"] Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.542584 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.544595 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.044575311 +0000 UTC m=+98.829715338 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.548420 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" event={"ID":"67a65cf0-6dcb-4730-a43b-0be90f5c8a93","Type":"ContainerStarted","Data":"641b78ee800cf6360a022187379510b7f5bc1a8f82d9e3d6ee8c7eb3253b1ea2"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.586601 4701 generic.go:334] "Generic (PLEG): container finished" podID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerID="5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae" exitCode=0 Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.586745 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkl92" event={"ID":"cb02901b-e5a6-4059-b49c-9011bfb481c9","Type":"ContainerDied","Data":"5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.586781 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkl92" event={"ID":"cb02901b-e5a6-4059-b49c-9011bfb481c9","Type":"ContainerStarted","Data":"45ad0c783eb40d6e26a8e6fd156d57d7e9bce257208d4435c67ca9e774188ee2"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.592897 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.639434 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" event={"ID":"b6dceb79-b806-4504-bc30-70497679c75d","Type":"ContainerStarted","Data":"a4c375371b8049043ac8cfc64f9354f3db5a567e27a1028b5bc244b64469eafd"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.645129 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.646404 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.146379023 +0000 UTC m=+98.931519040 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.653685 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hc62d"] Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.654919 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.671079 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2b8wr" event={"ID":"3dc5a393-ee87-4a1c-b786-9523a05de343","Type":"ContainerStarted","Data":"1122fca2c6c45e49ae9522abb082ce9592618f8c64409aec2818fd53d61ab4ef"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.697468 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hc62d"] Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.697820 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9d774" event={"ID":"4a518559-4b60-4e05-b0e1-7b2ef4b30817","Type":"ContainerStarted","Data":"adc48c0246ac959d6fae88ae1f64e93e85e158952d53784bd143fc4ef629dc6d"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.697864 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9d774" event={"ID":"4a518559-4b60-4e05-b0e1-7b2ef4b30817","Type":"ContainerStarted","Data":"ed9926df0b3070daaeb0bc63c474fe7522f4da04368c2c774c5209b879009baa"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.698550 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-9d774" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.752171 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lvcb\" (UniqueName: \"kubernetes.io/projected/5d0f6c12-cd85-4473-812f-d8fcffb2742e-kube-api-access-8lvcb\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.761197 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7fs2f" event={"ID":"a59af599-0d79-4301-8c37-e0e7189477ad","Type":"ContainerStarted","Data":"fb3b4072d727bebff9736397440879472574b3d1565c6bb50701623d40c48bb4"} Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.776385 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.779082 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.779127 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-utilities\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.779271 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-catalog-content\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.779343 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.779848 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.279826247 +0000 UTC m=+99.064966274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.812278 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" podStartSLOduration=72.812226784 podStartE2EDuration="1m12.812226784s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:27.804967006 +0000 UTC m=+98.590107033" watchObservedRunningTime="2025-11-21 19:03:27.812226784 +0000 UTC m=+98.597366811" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.819146 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.819240 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.819280 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mqbkq" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.886153 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.888231 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.890339 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-hsngh" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.895071 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.895288 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.395134474 +0000 UTC m=+99.180274501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.904715 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lvcb\" (UniqueName: \"kubernetes.io/projected/5d0f6c12-cd85-4473-812f-d8fcffb2742e-kube-api-access-8lvcb\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.905109 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-utilities\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.905337 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-catalog-content\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.905516 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.909890 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-utilities\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: E1121 19:03:27.911120 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.411101143 +0000 UTC m=+99.196241170 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.926197 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-catalog-content\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:27 crc kubenswrapper[4701]: I1121 19:03:27.928080 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-9d774" podStartSLOduration=9.928056863 podStartE2EDuration="9.928056863s" podCreationTimestamp="2025-11-21 19:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:27.865110488 +0000 UTC m=+98.650250515" watchObservedRunningTime="2025-11-21 19:03:27.928056863 +0000 UTC m=+98.713196890" Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.002186 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lvcb\" (UniqueName: \"kubernetes.io/projected/5d0f6c12-cd85-4473-812f-d8fcffb2742e-kube-api-access-8lvcb\") pod \"redhat-operators-hc62d\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.006598 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.008834 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.508812276 +0000 UTC m=+99.293952303 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.061068 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.108329 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6bck"] Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.108920 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.109385 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.609366901 +0000 UTC m=+99.394506928 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.209921 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.210561 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.71053734 +0000 UTC m=+99.495677367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.314354 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.314826 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.814810627 +0000 UTC m=+99.599950654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.350665 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w6nbm"] Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.416244 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.416799 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:28.916784103 +0000 UTC m=+99.701924130 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.518443 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.518904 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.018892963 +0000 UTC m=+99.804032980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.613368 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7wcwn" Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.620065 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.620731 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.120705555 +0000 UTC m=+99.905845582 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.723764 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.724707 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.224687015 +0000 UTC m=+100.009827042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.821739 4701 generic.go:334] "Generic (PLEG): container finished" podID="276e2cb3-e02e-4122-b10b-a454198b7954" containerID="43c55175c367845afbb71d4d61ac30931c3eaa6f41b08f77af904180bcecf56f" exitCode=0 Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.821849 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" event={"ID":"276e2cb3-e02e-4122-b10b-a454198b7954","Type":"ContainerDied","Data":"43c55175c367845afbb71d4d61ac30931c3eaa6f41b08f77af904180bcecf56f"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.828243 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.828707 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.328661975 +0000 UTC m=+100.113802012 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.836532 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerStarted","Data":"3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.836591 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerStarted","Data":"403d68382d830c6809c21c6c8a81e267a183c192411b971ae0df4e8124bd512c"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.868990 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerStarted","Data":"83d7a42597c2e260bf3ad823747c3449bffc7086aa83c3c9d84f684f8ae31228"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.869107 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerStarted","Data":"7af4cbae98316d2642a05aec1245ee44d12c583a6c22564712462da77d1ba019"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.880898 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hc62d"] Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.891975 4701 generic.go:334] "Generic (PLEG): container finished" podID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerID="0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8" exitCode=0 Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.893452 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcsbh" event={"ID":"795df5e4-162f-49a8-8316-2307c03a3f2d","Type":"ContainerDied","Data":"0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.893577 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcsbh" event={"ID":"795df5e4-162f-49a8-8316-2307c03a3f2d","Type":"ContainerStarted","Data":"a3cacd87e6814702b8dac247ccc17ddd1dfa12c5f4ac23f384e91c3dfcc886c2"} Nov 21 19:03:28 crc kubenswrapper[4701]: I1121 19:03:28.955307 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:28 crc kubenswrapper[4701]: E1121 19:03:28.964851 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.464823118 +0000 UTC m=+100.249963135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.057765 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.059676 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.559651578 +0000 UTC m=+100.344791605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.161540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.162145 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.662108635 +0000 UTC m=+100.447248662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.262102 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.262406 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.762389494 +0000 UTC m=+100.547529521 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.363689 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.364193 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.864149836 +0000 UTC m=+100.649289863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.382140 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.384671 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.389295 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.389461 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.390957 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.466395 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.466583 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.966552372 +0000 UTC m=+100.751692399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.466765 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.467288 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:29.967269987 +0000 UTC m=+100.752410014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.519743 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.520752 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.523266 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.523543 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.536816 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.560570 4701 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.568721 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.568893 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.068866665 +0000 UTC m=+100.854006692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.569034 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.569122 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/684dae85-0eb2-464b-8913-231a41728798-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.569167 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/684dae85-0eb2-464b-8913-231a41728798-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.570994 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.070978321 +0000 UTC m=+100.856118348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.672064 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.672571 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/acc49089-393a-4ff8-9d87-f181dc8d45f1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.672700 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.172661911 +0000 UTC m=+100.957801938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.673046 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.673140 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/acc49089-393a-4ff8-9d87-f181dc8d45f1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.673236 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/684dae85-0eb2-464b-8913-231a41728798-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.673292 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/684dae85-0eb2-464b-8913-231a41728798-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.673712 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/684dae85-0eb2-464b-8913-231a41728798-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.673848 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.173813817 +0000 UTC m=+100.958953844 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.691960 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/684dae85-0eb2-464b-8913-231a41728798-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.740753 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.775604 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.775789 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/acc49089-393a-4ff8-9d87-f181dc8d45f1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.775856 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/acc49089-393a-4ff8-9d87-f181dc8d45f1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.775920 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/acc49089-393a-4ff8-9d87-f181dc8d45f1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.775988 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.275973497 +0000 UTC m=+101.061113524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.795441 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/acc49089-393a-4ff8-9d87-f181dc8d45f1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.838900 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.876629 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.877134 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.377108595 +0000 UTC m=+101.162248612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.906942 4701 generic.go:334] "Generic (PLEG): container finished" podID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerID="83d7a42597c2e260bf3ad823747c3449bffc7086aa83c3c9d84f684f8ae31228" exitCode=0 Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.907037 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerDied","Data":"83d7a42597c2e260bf3ad823747c3449bffc7086aa83c3c9d84f684f8ae31228"} Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.912310 4701 generic.go:334] "Generic (PLEG): container finished" podID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerID="889193609ef8f3e0d4011ac8ba850b69c84580200c47ace25c329f3612453bfc" exitCode=0 Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.913093 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc62d" event={"ID":"5d0f6c12-cd85-4473-812f-d8fcffb2742e","Type":"ContainerDied","Data":"889193609ef8f3e0d4011ac8ba850b69c84580200c47ace25c329f3612453bfc"} Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.913124 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc62d" event={"ID":"5d0f6c12-cd85-4473-812f-d8fcffb2742e","Type":"ContainerStarted","Data":"3bf8bb4d8699d7285972e92a980070ff69cfa333484eb3a55117a4638cda1341"} Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.923316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" event={"ID":"b6dceb79-b806-4504-bc30-70497679c75d","Type":"ContainerStarted","Data":"e5cea19ffb76dd5314c926930d4cf364cfed0d1fc98687cd600932e85f329e31"} Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.925407 4701 generic.go:334] "Generic (PLEG): container finished" podID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerID="3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e" exitCode=0 Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.926326 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerDied","Data":"3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e"} Nov 21 19:03:29 crc kubenswrapper[4701]: I1121 19:03:29.978096 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:29 crc kubenswrapper[4701]: E1121 19:03:29.978643 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.478585951 +0000 UTC m=+101.263725978 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.056581 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.085540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:30 crc kubenswrapper[4701]: E1121 19:03:30.095867 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.595421691 +0000 UTC m=+101.380561718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wzsrk" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.125714 4701 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-21T19:03:29.560623535Z","Handler":null,"Name":""} Nov 21 19:03:30 crc kubenswrapper[4701]: W1121 19:03:30.126361 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod684dae85_0eb2_464b_8913_231a41728798.slice/crio-a6aed97c7e24622ffbeb8117082f6d0a7eba51cd466143e38cda1a2178708b92 WatchSource:0}: Error finding container a6aed97c7e24622ffbeb8117082f6d0a7eba51cd466143e38cda1a2178708b92: Status 404 returned error can't find the container with id a6aed97c7e24622ffbeb8117082f6d0a7eba51cd466143e38cda1a2178708b92 Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.186898 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:30 crc kubenswrapper[4701]: E1121 19:03:30.187546 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 19:03:30.687529972 +0000 UTC m=+101.472669999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.228548 4701 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.228594 4701 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.280252 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.289655 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzz8d\" (UniqueName: \"kubernetes.io/projected/276e2cb3-e02e-4122-b10b-a454198b7954-kube-api-access-pzz8d\") pod \"276e2cb3-e02e-4122-b10b-a454198b7954\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.289817 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/276e2cb3-e02e-4122-b10b-a454198b7954-secret-volume\") pod \"276e2cb3-e02e-4122-b10b-a454198b7954\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.289851 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/276e2cb3-e02e-4122-b10b-a454198b7954-config-volume\") pod \"276e2cb3-e02e-4122-b10b-a454198b7954\" (UID: \"276e2cb3-e02e-4122-b10b-a454198b7954\") " Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.290994 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.292255 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/276e2cb3-e02e-4122-b10b-a454198b7954-config-volume" (OuterVolumeSpecName: "config-volume") pod "276e2cb3-e02e-4122-b10b-a454198b7954" (UID: "276e2cb3-e02e-4122-b10b-a454198b7954"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.297347 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.298856 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276e2cb3-e02e-4122-b10b-a454198b7954-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "276e2cb3-e02e-4122-b10b-a454198b7954" (UID: "276e2cb3-e02e-4122-b10b-a454198b7954"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.300646 4701 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.300684 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.302173 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/276e2cb3-e02e-4122-b10b-a454198b7954-kube-api-access-pzz8d" (OuterVolumeSpecName: "kube-api-access-pzz8d") pod "276e2cb3-e02e-4122-b10b-a454198b7954" (UID: "276e2cb3-e02e-4122-b10b-a454198b7954"). InnerVolumeSpecName "kube-api-access-pzz8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:03:30 crc kubenswrapper[4701]: W1121 19:03:30.348722 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podacc49089_393a_4ff8_9d87_f181dc8d45f1.slice/crio-77964d2eefa6a27bc18a2ab897a148650e39735b126f993712d53dea49bca21d WatchSource:0}: Error finding container 77964d2eefa6a27bc18a2ab897a148650e39735b126f993712d53dea49bca21d: Status 404 returned error can't find the container with id 77964d2eefa6a27bc18a2ab897a148650e39735b126f993712d53dea49bca21d Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.353099 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wzsrk\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.384236 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.393442 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.393972 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/276e2cb3-e02e-4122-b10b-a454198b7954-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.393991 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/276e2cb3-e02e-4122-b10b-a454198b7954-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.394000 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzz8d\" (UniqueName: \"kubernetes.io/projected/276e2cb3-e02e-4122-b10b-a454198b7954-kube-api-access-pzz8d\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.432918 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.954962 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"684dae85-0eb2-464b-8913-231a41728798","Type":"ContainerStarted","Data":"a6aed97c7e24622ffbeb8117082f6d0a7eba51cd466143e38cda1a2178708b92"} Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.962622 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" event={"ID":"b6dceb79-b806-4504-bc30-70497679c75d","Type":"ContainerStarted","Data":"67c65d2d625ff987bdb8068ae6fa419da8137a8d7b683c1b14268b568090c6ee"} Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.967953 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.969091 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb" event={"ID":"276e2cb3-e02e-4122-b10b-a454198b7954","Type":"ContainerDied","Data":"a34c88325ebff25c37ad7a03259d4ba53ce1021b21efcabe8fba6525e24164d4"} Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.969161 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a34c88325ebff25c37ad7a03259d4ba53ce1021b21efcabe8fba6525e24164d4" Nov 21 19:03:30 crc kubenswrapper[4701]: I1121 19:03:30.971507 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"acc49089-393a-4ff8-9d87-f181dc8d45f1","Type":"ContainerStarted","Data":"77964d2eefa6a27bc18a2ab897a148650e39735b126f993712d53dea49bca21d"} Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.130938 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wzsrk"] Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.290717 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.330482 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.331288 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.341345 4701 patch_prober.go:28] interesting pod/console-f9d7485db-cwddx container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.17:8443/health\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.341402 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-cwddx" podUID="f7def574-9941-4933-83df-3f20df5797d4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.17:8443/health\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.374567 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.374678 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.374748 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.374790 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.664395 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.664938 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.678719 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:31 crc kubenswrapper[4701]: I1121 19:03:31.969590 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.053777 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" event={"ID":"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1","Type":"ContainerStarted","Data":"7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc"} Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.053828 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" event={"ID":"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1","Type":"ContainerStarted","Data":"b972d7d6b0636f43a9e54dd7abb163e4e545adb553b4ed10af6f216c3ec8b610"} Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.054727 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.064229 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"acc49089-393a-4ff8-9d87-f181dc8d45f1","Type":"ContainerStarted","Data":"e70592249ef407a12adb5126a3f09e77b20cb262ef09c18bbe2de32eb258b02a"} Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.078015 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" podStartSLOduration=77.078004225 podStartE2EDuration="1m17.078004225s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:32.074150912 +0000 UTC m=+102.859290939" watchObservedRunningTime="2025-11-21 19:03:32.078004225 +0000 UTC m=+102.863144252" Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.088028 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"684dae85-0eb2-464b-8913-231a41728798","Type":"ContainerStarted","Data":"73b2497303d8b63d26580e69edef4d6d99341a482f2a792c2f2ff436ca6cc0f4"} Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.095690 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" event={"ID":"b6dceb79-b806-4504-bc30-70497679c75d","Type":"ContainerStarted","Data":"afa15e5e74726a559606a6d2b5e40419332b4e05336a3d8572d1d3aea590a3e0"} Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.107289 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-g7gbn" Nov 21 19:03:32 crc kubenswrapper[4701]: I1121 19:03:32.152931 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-f2jgv" podStartSLOduration=14.152915861 podStartE2EDuration="14.152915861s" podCreationTimestamp="2025-11-21 19:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:32.151076061 +0000 UTC m=+102.936216088" watchObservedRunningTime="2025-11-21 19:03:32.152915861 +0000 UTC m=+102.938055888" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.140536 4701 generic.go:334] "Generic (PLEG): container finished" podID="acc49089-393a-4ff8-9d87-f181dc8d45f1" containerID="e70592249ef407a12adb5126a3f09e77b20cb262ef09c18bbe2de32eb258b02a" exitCode=0 Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.140624 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"acc49089-393a-4ff8-9d87-f181dc8d45f1","Type":"ContainerDied","Data":"e70592249ef407a12adb5126a3f09e77b20cb262ef09c18bbe2de32eb258b02a"} Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.155369 4701 generic.go:334] "Generic (PLEG): container finished" podID="684dae85-0eb2-464b-8913-231a41728798" containerID="73b2497303d8b63d26580e69edef4d6d99341a482f2a792c2f2ff436ca6cc0f4" exitCode=0 Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.156315 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"684dae85-0eb2-464b-8913-231a41728798","Type":"ContainerDied","Data":"73b2497303d8b63d26580e69edef4d6d99341a482f2a792c2f2ff436ca6cc0f4"} Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.726034 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.740787 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.876034 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/acc49089-393a-4ff8-9d87-f181dc8d45f1-kube-api-access\") pod \"acc49089-393a-4ff8-9d87-f181dc8d45f1\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.876135 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/acc49089-393a-4ff8-9d87-f181dc8d45f1-kubelet-dir\") pod \"acc49089-393a-4ff8-9d87-f181dc8d45f1\" (UID: \"acc49089-393a-4ff8-9d87-f181dc8d45f1\") " Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.876182 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/684dae85-0eb2-464b-8913-231a41728798-kube-api-access\") pod \"684dae85-0eb2-464b-8913-231a41728798\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.876242 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/684dae85-0eb2-464b-8913-231a41728798-kubelet-dir\") pod \"684dae85-0eb2-464b-8913-231a41728798\" (UID: \"684dae85-0eb2-464b-8913-231a41728798\") " Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.876546 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/684dae85-0eb2-464b-8913-231a41728798-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "684dae85-0eb2-464b-8913-231a41728798" (UID: "684dae85-0eb2-464b-8913-231a41728798"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.876581 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/acc49089-393a-4ff8-9d87-f181dc8d45f1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "acc49089-393a-4ff8-9d87-f181dc8d45f1" (UID: "acc49089-393a-4ff8-9d87-f181dc8d45f1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.891491 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acc49089-393a-4ff8-9d87-f181dc8d45f1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "acc49089-393a-4ff8-9d87-f181dc8d45f1" (UID: "acc49089-393a-4ff8-9d87-f181dc8d45f1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.899084 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/684dae85-0eb2-464b-8913-231a41728798-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "684dae85-0eb2-464b-8913-231a41728798" (UID: "684dae85-0eb2-464b-8913-231a41728798"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.978024 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/acc49089-393a-4ff8-9d87-f181dc8d45f1-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.978056 4701 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/acc49089-393a-4ff8-9d87-f181dc8d45f1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.978065 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/684dae85-0eb2-464b-8913-231a41728798-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:33 crc kubenswrapper[4701]: I1121 19:03:33.978076 4701 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/684dae85-0eb2-464b-8913-231a41728798-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.080142 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.085899 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73831ccf-a071-4135-b8bf-ee1b9b3c2cd1-metrics-certs\") pod \"network-metrics-daemon-q5n7s\" (UID: \"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1\") " pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.181429 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.181381 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"acc49089-393a-4ff8-9d87-f181dc8d45f1","Type":"ContainerDied","Data":"77964d2eefa6a27bc18a2ab897a148650e39735b126f993712d53dea49bca21d"} Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.181851 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77964d2eefa6a27bc18a2ab897a148650e39735b126f993712d53dea49bca21d" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.188764 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.188773 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"684dae85-0eb2-464b-8913-231a41728798","Type":"ContainerDied","Data":"a6aed97c7e24622ffbeb8117082f6d0a7eba51cd466143e38cda1a2178708b92"} Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.188834 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6aed97c7e24622ffbeb8117082f6d0a7eba51cd466143e38cda1a2178708b92" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.211904 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q5n7s" Nov 21 19:03:34 crc kubenswrapper[4701]: I1121 19:03:34.692421 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-q5n7s"] Nov 21 19:03:35 crc kubenswrapper[4701]: I1121 19:03:35.215443 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" event={"ID":"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1","Type":"ContainerStarted","Data":"7295088fe2e73d183aae1b19346c49004a5df06c1a38b2f7d18c6fccdc329ab6"} Nov 21 19:03:36 crc kubenswrapper[4701]: I1121 19:03:36.236285 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" event={"ID":"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1","Type":"ContainerStarted","Data":"6be3ff3249951b43585d189f45b9735b9e03adc2fbd461389d78c96825bfc293"} Nov 21 19:03:36 crc kubenswrapper[4701]: I1121 19:03:36.875932 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-9d774" Nov 21 19:03:37 crc kubenswrapper[4701]: I1121 19:03:37.263148 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-q5n7s" event={"ID":"73831ccf-a071-4135-b8bf-ee1b9b3c2cd1","Type":"ContainerStarted","Data":"51f55d07d084c08734e36ab8f4cb0611479b0203ef053bad0fa6d9d8d59014f9"} Nov 21 19:03:38 crc kubenswrapper[4701]: I1121 19:03:38.293525 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-q5n7s" podStartSLOduration=83.293507365 podStartE2EDuration="1m23.293507365s" podCreationTimestamp="2025-11-21 19:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:03:38.288002895 +0000 UTC m=+109.073142942" watchObservedRunningTime="2025-11-21 19:03:38.293507365 +0000 UTC m=+109.078647392" Nov 21 19:03:41 crc kubenswrapper[4701]: I1121 19:03:41.378071 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:41 crc kubenswrapper[4701]: I1121 19:03:41.378729 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:41 crc kubenswrapper[4701]: I1121 19:03:41.378142 4701 patch_prober.go:28] interesting pod/downloads-7954f5f757-hbp5b container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Nov 21 19:03:41 crc kubenswrapper[4701]: I1121 19:03:41.379273 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hbp5b" podUID="5d7aff3f-cf94-411b-b23b-c91f58cdc2f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Nov 21 19:03:41 crc kubenswrapper[4701]: I1121 19:03:41.381687 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:41 crc kubenswrapper[4701]: I1121 19:03:41.393628 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:03:47 crc kubenswrapper[4701]: I1121 19:03:47.804659 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:03:50 crc kubenswrapper[4701]: I1121 19:03:50.392141 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:03:51 crc kubenswrapper[4701]: I1121 19:03:51.388404 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-hbp5b" Nov 21 19:04:00 crc kubenswrapper[4701]: E1121 19:04:00.599446 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 21 19:04:00 crc kubenswrapper[4701]: E1121 19:04:00.600346 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xsddx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-h6bck_openshift-marketplace(53624f72-bfb0-4be5-b00a-f06de73ae1f0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:00 crc kubenswrapper[4701]: E1121 19:04:00.601624 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-h6bck" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" Nov 21 19:04:01 crc kubenswrapper[4701]: I1121 19:04:01.275725 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-twqqt" Nov 21 19:04:02 crc kubenswrapper[4701]: E1121 19:04:02.786188 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-h6bck" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" Nov 21 19:04:03 crc kubenswrapper[4701]: E1121 19:04:03.503925 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 21 19:04:03 crc kubenswrapper[4701]: E1121 19:04:03.504532 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ddzdt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-xjpsq_openshift-marketplace(4b7dd16f-9a0f-406f-84b4-cc94baf5405c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:03 crc kubenswrapper[4701]: E1121 19:04:03.505918 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-xjpsq" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" Nov 21 19:04:04 crc kubenswrapper[4701]: E1121 19:04:04.127515 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 21 19:04:04 crc kubenswrapper[4701]: E1121 19:04:04.127676 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-76lkf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-nkl92_openshift-marketplace(cb02901b-e5a6-4059-b49c-9011bfb481c9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:04 crc kubenswrapper[4701]: E1121 19:04:04.129666 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-nkl92" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" Nov 21 19:04:07 crc kubenswrapper[4701]: E1121 19:04:07.954623 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-xjpsq" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" Nov 21 19:04:07 crc kubenswrapper[4701]: E1121 19:04:07.955104 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-nkl92" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" Nov 21 19:04:08 crc kubenswrapper[4701]: E1121 19:04:08.192470 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 21 19:04:08 crc kubenswrapper[4701]: E1121 19:04:08.192969 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh648,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-w6nbm_openshift-marketplace(90bb3a29-464a-4967-a009-d4fa6b92de73): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:08 crc kubenswrapper[4701]: E1121 19:04:08.194328 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-w6nbm" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.452480 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-w6nbm" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.499798 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.500048 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8lvcb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-hc62d_openshift-marketplace(5d0f6c12-cd85-4473-812f-d8fcffb2742e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.501339 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-hc62d" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.549923 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.550156 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-slgtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rcsbh_openshift-marketplace(795df5e4-162f-49a8-8316-2307c03a3f2d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.551442 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-rcsbh" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.574679 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-rcsbh" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" Nov 21 19:04:12 crc kubenswrapper[4701]: E1121 19:04:12.574913 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-hc62d" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" Nov 21 19:04:14 crc kubenswrapper[4701]: E1121 19:04:14.057693 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 21 19:04:14 crc kubenswrapper[4701]: E1121 19:04:14.057873 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dlbd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-r9glm_openshift-marketplace(dde062d0-393b-4b35-80ec-c3f67c2a5129): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:14 crc kubenswrapper[4701]: E1121 19:04:14.060441 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-r9glm" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" Nov 21 19:04:14 crc kubenswrapper[4701]: E1121 19:04:14.588253 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-r9glm" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" Nov 21 19:04:15 crc kubenswrapper[4701]: E1121 19:04:15.580834 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 21 19:04:15 crc kubenswrapper[4701]: E1121 19:04:15.581270 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ppk7z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-7xdws_openshift-marketplace(02015e4f-1f27-4004-8bc6-778e6db5fb94): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:04:15 crc kubenswrapper[4701]: E1121 19:04:15.582491 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-7xdws" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" Nov 21 19:04:15 crc kubenswrapper[4701]: E1121 19:04:15.596598 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-7xdws" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.605461 4701 generic.go:334] "Generic (PLEG): container finished" podID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerID="d13c7b733fd5da540c1b997440bbce022161a6f3d39b7fd2c81bd03f0c0bba31" exitCode=0 Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.605533 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerDied","Data":"d13c7b733fd5da540c1b997440bbce022161a6f3d39b7fd2c81bd03f0c0bba31"} Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.790714 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.790806 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.790859 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.790939 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.792969 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.793662 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.793836 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.802876 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.803357 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.808236 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.815311 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.817310 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.865968 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:04:17 crc kubenswrapper[4701]: I1121 19:04:17.876143 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.006882 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 19:04:18 crc kubenswrapper[4701]: W1121 19:04:18.271952 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-c23059532ecf1ff043bcadbae2bc87590df9e28bde17121c6b2eb55c40d83ce9 WatchSource:0}: Error finding container c23059532ecf1ff043bcadbae2bc87590df9e28bde17121c6b2eb55c40d83ce9: Status 404 returned error can't find the container with id c23059532ecf1ff043bcadbae2bc87590df9e28bde17121c6b2eb55c40d83ce9 Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.613819 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.614458 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.624615 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c0aeed57c960a0b0dbe876371d712da7c4b2425ac1eac444546052d3ae5ab2cc"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.624673 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6e9d93948e10a8a76a7ad093de7be1a52f6997d2402ffa06f11a234c39786f37"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.638673 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerStarted","Data":"17084e9d101ff0f7819b10979944eb99c8210b852fc8b203197d7bc4a9e5af95"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.642117 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"653c52048a2fcc7ca8d43682b4a08f39b91a1e90a27a138b70b426c64f53b81b"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.642224 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"9e01be81dacab70ab885934159f30528d793a561a7dff4c77bd71826a0561bfd"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.642911 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.645121 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4d7d6f7734ab043984cf9a79512597fb7c3ef5b39e30de7cf08b3cefc45e0113"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.645228 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"c23059532ecf1ff043bcadbae2bc87590df9e28bde17121c6b2eb55c40d83ce9"} Nov 21 19:04:18 crc kubenswrapper[4701]: I1121 19:04:18.704893 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h6bck" podStartSLOduration=3.508332309 podStartE2EDuration="52.704861979s" podCreationTimestamp="2025-11-21 19:03:26 +0000 UTC" firstStartedPulling="2025-11-21 19:03:28.872078503 +0000 UTC m=+99.657218530" lastFinishedPulling="2025-11-21 19:04:18.068608173 +0000 UTC m=+148.853748200" observedRunningTime="2025-11-21 19:04:18.674604614 +0000 UTC m=+149.459744641" watchObservedRunningTime="2025-11-21 19:04:18.704861979 +0000 UTC m=+149.490002016" Nov 21 19:04:20 crc kubenswrapper[4701]: I1121 19:04:20.665293 4701 generic.go:334] "Generic (PLEG): container finished" podID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerID="bb3ae36789b70877087f91f129172dd05d6ada2891ad1e4d3ea3051741bf2b4f" exitCode=0 Nov 21 19:04:20 crc kubenswrapper[4701]: I1121 19:04:20.665608 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjpsq" event={"ID":"4b7dd16f-9a0f-406f-84b4-cc94baf5405c","Type":"ContainerDied","Data":"bb3ae36789b70877087f91f129172dd05d6ada2891ad1e4d3ea3051741bf2b4f"} Nov 21 19:04:21 crc kubenswrapper[4701]: I1121 19:04:21.687055 4701 generic.go:334] "Generic (PLEG): container finished" podID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerID="99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf" exitCode=0 Nov 21 19:04:21 crc kubenswrapper[4701]: I1121 19:04:21.687157 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkl92" event={"ID":"cb02901b-e5a6-4059-b49c-9011bfb481c9","Type":"ContainerDied","Data":"99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf"} Nov 21 19:04:21 crc kubenswrapper[4701]: I1121 19:04:21.693548 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjpsq" event={"ID":"4b7dd16f-9a0f-406f-84b4-cc94baf5405c","Type":"ContainerStarted","Data":"626ccb54ea122ed7f9ddbb4cd1e6314a69bc5cbcd254f67384cbe086c405dfc5"} Nov 21 19:04:21 crc kubenswrapper[4701]: I1121 19:04:21.754586 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xjpsq" podStartSLOduration=3.898022337 podStartE2EDuration="57.754550509s" podCreationTimestamp="2025-11-21 19:03:24 +0000 UTC" firstStartedPulling="2025-11-21 19:03:27.500152021 +0000 UTC m=+98.285292048" lastFinishedPulling="2025-11-21 19:04:21.356680153 +0000 UTC m=+152.141820220" observedRunningTime="2025-11-21 19:04:21.749909256 +0000 UTC m=+152.535049313" watchObservedRunningTime="2025-11-21 19:04:21.754550509 +0000 UTC m=+152.539690556" Nov 21 19:04:22 crc kubenswrapper[4701]: I1121 19:04:22.704445 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkl92" event={"ID":"cb02901b-e5a6-4059-b49c-9011bfb481c9","Type":"ContainerStarted","Data":"aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df"} Nov 21 19:04:22 crc kubenswrapper[4701]: I1121 19:04:22.736908 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nkl92" podStartSLOduration=4.250591407 podStartE2EDuration="58.736877568s" podCreationTimestamp="2025-11-21 19:03:24 +0000 UTC" firstStartedPulling="2025-11-21 19:03:27.664471289 +0000 UTC m=+98.449611316" lastFinishedPulling="2025-11-21 19:04:22.15075745 +0000 UTC m=+152.935897477" observedRunningTime="2025-11-21 19:04:22.731293571 +0000 UTC m=+153.516433628" watchObservedRunningTime="2025-11-21 19:04:22.736877568 +0000 UTC m=+153.522017635" Nov 21 19:04:24 crc kubenswrapper[4701]: I1121 19:04:24.446802 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:04:24 crc kubenswrapper[4701]: I1121 19:04:24.447323 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:04:24 crc kubenswrapper[4701]: I1121 19:04:24.665235 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:04:24 crc kubenswrapper[4701]: I1121 19:04:24.760481 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:04:24 crc kubenswrapper[4701]: I1121 19:04:24.761133 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:04:24 crc kubenswrapper[4701]: I1121 19:04:24.802880 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:04:25 crc kubenswrapper[4701]: I1121 19:04:25.728319 4701 generic.go:334] "Generic (PLEG): container finished" podID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerID="2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb" exitCode=0 Nov 21 19:04:25 crc kubenswrapper[4701]: I1121 19:04:25.728486 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcsbh" event={"ID":"795df5e4-162f-49a8-8316-2307c03a3f2d","Type":"ContainerDied","Data":"2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb"} Nov 21 19:04:26 crc kubenswrapper[4701]: I1121 19:04:26.757514 4701 generic.go:334] "Generic (PLEG): container finished" podID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerID="53beb9fd86c3c396974b6bc3cdcab587b4842775ced915ae1c56c9581c663aa1" exitCode=0 Nov 21 19:04:26 crc kubenswrapper[4701]: I1121 19:04:26.757605 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc62d" event={"ID":"5d0f6c12-cd85-4473-812f-d8fcffb2742e","Type":"ContainerDied","Data":"53beb9fd86c3c396974b6bc3cdcab587b4842775ced915ae1c56c9581c663aa1"} Nov 21 19:04:26 crc kubenswrapper[4701]: I1121 19:04:26.764566 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcsbh" event={"ID":"795df5e4-162f-49a8-8316-2307c03a3f2d","Type":"ContainerStarted","Data":"a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c"} Nov 21 19:04:26 crc kubenswrapper[4701]: I1121 19:04:26.769317 4701 generic.go:334] "Generic (PLEG): container finished" podID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerID="6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377" exitCode=0 Nov 21 19:04:26 crc kubenswrapper[4701]: I1121 19:04:26.769358 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerDied","Data":"6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377"} Nov 21 19:04:26 crc kubenswrapper[4701]: I1121 19:04:26.797644 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rcsbh" podStartSLOduration=3.545842668 podStartE2EDuration="1m0.797622482s" podCreationTimestamp="2025-11-21 19:03:26 +0000 UTC" firstStartedPulling="2025-11-21 19:03:28.898364907 +0000 UTC m=+99.683504934" lastFinishedPulling="2025-11-21 19:04:26.150144701 +0000 UTC m=+156.935284748" observedRunningTime="2025-11-21 19:04:26.791990844 +0000 UTC m=+157.577130881" watchObservedRunningTime="2025-11-21 19:04:26.797622482 +0000 UTC m=+157.582762519" Nov 21 19:04:27 crc kubenswrapper[4701]: I1121 19:04:27.120901 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:04:27 crc kubenswrapper[4701]: I1121 19:04:27.121773 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:04:27 crc kubenswrapper[4701]: I1121 19:04:27.181348 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:04:27 crc kubenswrapper[4701]: I1121 19:04:27.822481 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.653938 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-twmdp"] Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.786963 4701 generic.go:334] "Generic (PLEG): container finished" podID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerID="3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243" exitCode=0 Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.787035 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9glm" event={"ID":"dde062d0-393b-4b35-80ec-c3f67c2a5129","Type":"ContainerDied","Data":"3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243"} Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.790010 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerStarted","Data":"b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d"} Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.792640 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc62d" event={"ID":"5d0f6c12-cd85-4473-812f-d8fcffb2742e","Type":"ContainerStarted","Data":"034c57d32f58351b20e0aef516b94a474e4e0c3f49d91e6162e4b2199c2ab2ac"} Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.833888 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hc62d" podStartSLOduration=4.125428526 podStartE2EDuration="1m1.833870803s" podCreationTimestamp="2025-11-21 19:03:27 +0000 UTC" firstStartedPulling="2025-11-21 19:03:29.924897008 +0000 UTC m=+100.710037035" lastFinishedPulling="2025-11-21 19:04:27.633339285 +0000 UTC m=+158.418479312" observedRunningTime="2025-11-21 19:04:28.830947137 +0000 UTC m=+159.616087154" watchObservedRunningTime="2025-11-21 19:04:28.833870803 +0000 UTC m=+159.619010830" Nov 21 19:04:28 crc kubenswrapper[4701]: I1121 19:04:28.848841 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w6nbm" podStartSLOduration=4.054327911 podStartE2EDuration="1m1.848811436s" podCreationTimestamp="2025-11-21 19:03:27 +0000 UTC" firstStartedPulling="2025-11-21 19:03:29.930460109 +0000 UTC m=+100.715600136" lastFinishedPulling="2025-11-21 19:04:27.724943594 +0000 UTC m=+158.510083661" observedRunningTime="2025-11-21 19:04:28.847496272 +0000 UTC m=+159.632636299" watchObservedRunningTime="2025-11-21 19:04:28.848811436 +0000 UTC m=+159.633951463" Nov 21 19:04:29 crc kubenswrapper[4701]: I1121 19:04:29.286130 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6bck"] Nov 21 19:04:29 crc kubenswrapper[4701]: I1121 19:04:29.801192 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerStarted","Data":"e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03"} Nov 21 19:04:29 crc kubenswrapper[4701]: I1121 19:04:29.816802 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9glm" event={"ID":"dde062d0-393b-4b35-80ec-c3f67c2a5129","Type":"ContainerStarted","Data":"93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d"} Nov 21 19:04:29 crc kubenswrapper[4701]: I1121 19:04:29.846290 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r9glm" podStartSLOduration=4.129548821 podStartE2EDuration="1m5.846264783s" podCreationTimestamp="2025-11-21 19:03:24 +0000 UTC" firstStartedPulling="2025-11-21 19:03:27.483337044 +0000 UTC m=+98.268477071" lastFinishedPulling="2025-11-21 19:04:29.200052966 +0000 UTC m=+159.985193033" observedRunningTime="2025-11-21 19:04:29.844561729 +0000 UTC m=+160.629701756" watchObservedRunningTime="2025-11-21 19:04:29.846264783 +0000 UTC m=+160.631404810" Nov 21 19:04:30 crc kubenswrapper[4701]: I1121 19:04:30.823684 4701 generic.go:334] "Generic (PLEG): container finished" podID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerID="e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03" exitCode=0 Nov 21 19:04:30 crc kubenswrapper[4701]: I1121 19:04:30.823758 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerDied","Data":"e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03"} Nov 21 19:04:30 crc kubenswrapper[4701]: I1121 19:04:30.824988 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h6bck" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="registry-server" containerID="cri-o://17084e9d101ff0f7819b10979944eb99c8210b852fc8b203197d7bc4a9e5af95" gracePeriod=2 Nov 21 19:04:31 crc kubenswrapper[4701]: I1121 19:04:31.833874 4701 generic.go:334] "Generic (PLEG): container finished" podID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerID="17084e9d101ff0f7819b10979944eb99c8210b852fc8b203197d7bc4a9e5af95" exitCode=0 Nov 21 19:04:31 crc kubenswrapper[4701]: I1121 19:04:31.833954 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerDied","Data":"17084e9d101ff0f7819b10979944eb99c8210b852fc8b203197d7bc4a9e5af95"} Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.562579 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.745645 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-utilities\") pod \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.745786 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-catalog-content\") pod \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.745874 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsddx\" (UniqueName: \"kubernetes.io/projected/53624f72-bfb0-4be5-b00a-f06de73ae1f0-kube-api-access-xsddx\") pod \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\" (UID: \"53624f72-bfb0-4be5-b00a-f06de73ae1f0\") " Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.746552 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-utilities" (OuterVolumeSpecName: "utilities") pod "53624f72-bfb0-4be5-b00a-f06de73ae1f0" (UID: "53624f72-bfb0-4be5-b00a-f06de73ae1f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.752587 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53624f72-bfb0-4be5-b00a-f06de73ae1f0-kube-api-access-xsddx" (OuterVolumeSpecName: "kube-api-access-xsddx") pod "53624f72-bfb0-4be5-b00a-f06de73ae1f0" (UID: "53624f72-bfb0-4be5-b00a-f06de73ae1f0"). InnerVolumeSpecName "kube-api-access-xsddx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.768450 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "53624f72-bfb0-4be5-b00a-f06de73ae1f0" (UID: "53624f72-bfb0-4be5-b00a-f06de73ae1f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.844674 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6bck" event={"ID":"53624f72-bfb0-4be5-b00a-f06de73ae1f0","Type":"ContainerDied","Data":"7af4cbae98316d2642a05aec1245ee44d12c583a6c22564712462da77d1ba019"} Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.844737 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6bck" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.844760 4701 scope.go:117] "RemoveContainer" containerID="17084e9d101ff0f7819b10979944eb99c8210b852fc8b203197d7bc4a9e5af95" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.847154 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.847190 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53624f72-bfb0-4be5-b00a-f06de73ae1f0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.847223 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsddx\" (UniqueName: \"kubernetes.io/projected/53624f72-bfb0-4be5-b00a-f06de73ae1f0-kube-api-access-xsddx\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.870082 4701 scope.go:117] "RemoveContainer" containerID="d13c7b733fd5da540c1b997440bbce022161a6f3d39b7fd2c81bd03f0c0bba31" Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.879500 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6bck"] Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.881774 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6bck"] Nov 21 19:04:32 crc kubenswrapper[4701]: I1121 19:04:32.901469 4701 scope.go:117] "RemoveContainer" containerID="83d7a42597c2e260bf3ad823747c3449bffc7086aa83c3c9d84f684f8ae31228" Nov 21 19:04:33 crc kubenswrapper[4701]: I1121 19:04:33.854782 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerStarted","Data":"82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996"} Nov 21 19:04:33 crc kubenswrapper[4701]: I1121 19:04:33.878616 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7xdws" podStartSLOduration=4.371719646 podStartE2EDuration="1m9.87859067s" podCreationTimestamp="2025-11-21 19:03:24 +0000 UTC" firstStartedPulling="2025-11-21 19:03:27.462300744 +0000 UTC m=+98.247440771" lastFinishedPulling="2025-11-21 19:04:32.969171768 +0000 UTC m=+163.754311795" observedRunningTime="2025-11-21 19:04:33.876845454 +0000 UTC m=+164.661985481" watchObservedRunningTime="2025-11-21 19:04:33.87859067 +0000 UTC m=+164.663730697" Nov 21 19:04:33 crc kubenswrapper[4701]: I1121 19:04:33.958400 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" path="/var/lib/kubelet/pods/53624f72-bfb0-4be5-b00a-f06de73ae1f0/volumes" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.499226 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.657421 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.657475 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.703717 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.821181 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.915927 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.915983 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:04:34 crc kubenswrapper[4701]: I1121 19:04:34.933987 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:04:35 crc kubenswrapper[4701]: I1121 19:04:35.967162 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-7xdws" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="registry-server" probeResult="failure" output=< Nov 21 19:04:35 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:04:35 crc kubenswrapper[4701]: > Nov 21 19:04:36 crc kubenswrapper[4701]: I1121 19:04:36.580748 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:04:36 crc kubenswrapper[4701]: I1121 19:04:36.581133 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:04:36 crc kubenswrapper[4701]: I1121 19:04:36.629020 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:04:36 crc kubenswrapper[4701]: I1121 19:04:36.941227 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:04:37 crc kubenswrapper[4701]: I1121 19:04:37.593964 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:04:37 crc kubenswrapper[4701]: I1121 19:04:37.594047 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:04:37 crc kubenswrapper[4701]: I1121 19:04:37.650731 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:04:37 crc kubenswrapper[4701]: I1121 19:04:37.683769 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xjpsq"] Nov 21 19:04:37 crc kubenswrapper[4701]: I1121 19:04:37.684127 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xjpsq" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="registry-server" containerID="cri-o://626ccb54ea122ed7f9ddbb4cd1e6314a69bc5cbcd254f67384cbe086c405dfc5" gracePeriod=2 Nov 21 19:04:37 crc kubenswrapper[4701]: I1121 19:04:37.965520 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:04:38 crc kubenswrapper[4701]: I1121 19:04:38.062978 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:04:38 crc kubenswrapper[4701]: I1121 19:04:38.063045 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:04:38 crc kubenswrapper[4701]: I1121 19:04:38.109235 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:04:38 crc kubenswrapper[4701]: I1121 19:04:38.937477 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:04:39 crc kubenswrapper[4701]: I1121 19:04:39.902040 4701 generic.go:334] "Generic (PLEG): container finished" podID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerID="626ccb54ea122ed7f9ddbb4cd1e6314a69bc5cbcd254f67384cbe086c405dfc5" exitCode=0 Nov 21 19:04:39 crc kubenswrapper[4701]: I1121 19:04:39.902122 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjpsq" event={"ID":"4b7dd16f-9a0f-406f-84b4-cc94baf5405c","Type":"ContainerDied","Data":"626ccb54ea122ed7f9ddbb4cd1e6314a69bc5cbcd254f67384cbe086c405dfc5"} Nov 21 19:04:39 crc kubenswrapper[4701]: I1121 19:04:39.996594 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.098957 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddzdt\" (UniqueName: \"kubernetes.io/projected/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-kube-api-access-ddzdt\") pod \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.099060 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-utilities\") pod \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.099118 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-catalog-content\") pod \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\" (UID: \"4b7dd16f-9a0f-406f-84b4-cc94baf5405c\") " Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.100257 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-utilities" (OuterVolumeSpecName: "utilities") pod "4b7dd16f-9a0f-406f-84b4-cc94baf5405c" (UID: "4b7dd16f-9a0f-406f-84b4-cc94baf5405c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.107284 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-kube-api-access-ddzdt" (OuterVolumeSpecName: "kube-api-access-ddzdt") pod "4b7dd16f-9a0f-406f-84b4-cc94baf5405c" (UID: "4b7dd16f-9a0f-406f-84b4-cc94baf5405c"). InnerVolumeSpecName "kube-api-access-ddzdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.154062 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b7dd16f-9a0f-406f-84b4-cc94baf5405c" (UID: "4b7dd16f-9a0f-406f-84b4-cc94baf5405c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.200649 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.200970 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddzdt\" (UniqueName: \"kubernetes.io/projected/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-kube-api-access-ddzdt\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.201041 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b7dd16f-9a0f-406f-84b4-cc94baf5405c-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.690982 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hc62d"] Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.913634 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjpsq" event={"ID":"4b7dd16f-9a0f-406f-84b4-cc94baf5405c","Type":"ContainerDied","Data":"07bce4eb0850245b385a494d55391adfd9b954e0a1d1227506fabd7851769060"} Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.913714 4701 scope.go:117] "RemoveContainer" containerID="626ccb54ea122ed7f9ddbb4cd1e6314a69bc5cbcd254f67384cbe086c405dfc5" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.913718 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjpsq" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.913852 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hc62d" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="registry-server" containerID="cri-o://034c57d32f58351b20e0aef516b94a474e4e0c3f49d91e6162e4b2199c2ab2ac" gracePeriod=2 Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.938110 4701 scope.go:117] "RemoveContainer" containerID="bb3ae36789b70877087f91f129172dd05d6ada2891ad1e4d3ea3051741bf2b4f" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.962759 4701 scope.go:117] "RemoveContainer" containerID="0a052ec5790244fa183eec3679f81c5c3d55a9049d96ef99f670800f267fbfeb" Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.970756 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xjpsq"] Nov 21 19:04:40 crc kubenswrapper[4701]: I1121 19:04:40.983390 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xjpsq"] Nov 21 19:04:41 crc kubenswrapper[4701]: I1121 19:04:41.933447 4701 generic.go:334] "Generic (PLEG): container finished" podID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerID="034c57d32f58351b20e0aef516b94a474e4e0c3f49d91e6162e4b2199c2ab2ac" exitCode=0 Nov 21 19:04:41 crc kubenswrapper[4701]: I1121 19:04:41.933528 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc62d" event={"ID":"5d0f6c12-cd85-4473-812f-d8fcffb2742e","Type":"ContainerDied","Data":"034c57d32f58351b20e0aef516b94a474e4e0c3f49d91e6162e4b2199c2ab2ac"} Nov 21 19:04:41 crc kubenswrapper[4701]: I1121 19:04:41.988910 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" path="/var/lib/kubelet/pods/4b7dd16f-9a0f-406f-84b4-cc94baf5405c/volumes" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.041155 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.129380 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-utilities\") pod \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.129428 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-catalog-content\") pod \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.129512 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lvcb\" (UniqueName: \"kubernetes.io/projected/5d0f6c12-cd85-4473-812f-d8fcffb2742e-kube-api-access-8lvcb\") pod \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\" (UID: \"5d0f6c12-cd85-4473-812f-d8fcffb2742e\") " Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.130484 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-utilities" (OuterVolumeSpecName: "utilities") pod "5d0f6c12-cd85-4473-812f-d8fcffb2742e" (UID: "5d0f6c12-cd85-4473-812f-d8fcffb2742e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.136419 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d0f6c12-cd85-4473-812f-d8fcffb2742e-kube-api-access-8lvcb" (OuterVolumeSpecName: "kube-api-access-8lvcb") pod "5d0f6c12-cd85-4473-812f-d8fcffb2742e" (UID: "5d0f6c12-cd85-4473-812f-d8fcffb2742e"). InnerVolumeSpecName "kube-api-access-8lvcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.231642 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lvcb\" (UniqueName: \"kubernetes.io/projected/5d0f6c12-cd85-4473-812f-d8fcffb2742e-kube-api-access-8lvcb\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.232060 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.241003 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d0f6c12-cd85-4473-812f-d8fcffb2742e" (UID: "5d0f6c12-cd85-4473-812f-d8fcffb2742e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.333080 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d0f6c12-cd85-4473-812f-d8fcffb2742e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.944217 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc62d" event={"ID":"5d0f6c12-cd85-4473-812f-d8fcffb2742e","Type":"ContainerDied","Data":"3bf8bb4d8699d7285972e92a980070ff69cfa333484eb3a55117a4638cda1341"} Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.944279 4701 scope.go:117] "RemoveContainer" containerID="034c57d32f58351b20e0aef516b94a474e4e0c3f49d91e6162e4b2199c2ab2ac" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.944294 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hc62d" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.979658 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hc62d"] Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.982674 4701 scope.go:117] "RemoveContainer" containerID="53beb9fd86c3c396974b6bc3cdcab587b4842775ced915ae1c56c9581c663aa1" Nov 21 19:04:42 crc kubenswrapper[4701]: I1121 19:04:42.985021 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hc62d"] Nov 21 19:04:43 crc kubenswrapper[4701]: I1121 19:04:43.004472 4701 scope.go:117] "RemoveContainer" containerID="889193609ef8f3e0d4011ac8ba850b69c84580200c47ace25c329f3612453bfc" Nov 21 19:04:43 crc kubenswrapper[4701]: I1121 19:04:43.963417 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" path="/var/lib/kubelet/pods/5d0f6c12-cd85-4473-812f-d8fcffb2742e/volumes" Nov 21 19:04:44 crc kubenswrapper[4701]: I1121 19:04:44.967948 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:04:45 crc kubenswrapper[4701]: I1121 19:04:45.018473 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.089126 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7xdws"] Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.089484 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7xdws" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="registry-server" containerID="cri-o://82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996" gracePeriod=2 Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.569964 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.614245 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.614370 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.639979 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-utilities\") pod \"02015e4f-1f27-4004-8bc6-778e6db5fb94\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.640183 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-catalog-content\") pod \"02015e4f-1f27-4004-8bc6-778e6db5fb94\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.640402 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppk7z\" (UniqueName: \"kubernetes.io/projected/02015e4f-1f27-4004-8bc6-778e6db5fb94-kube-api-access-ppk7z\") pod \"02015e4f-1f27-4004-8bc6-778e6db5fb94\" (UID: \"02015e4f-1f27-4004-8bc6-778e6db5fb94\") " Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.640969 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-utilities" (OuterVolumeSpecName: "utilities") pod "02015e4f-1f27-4004-8bc6-778e6db5fb94" (UID: "02015e4f-1f27-4004-8bc6-778e6db5fb94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.641119 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.648844 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02015e4f-1f27-4004-8bc6-778e6db5fb94-kube-api-access-ppk7z" (OuterVolumeSpecName: "kube-api-access-ppk7z") pod "02015e4f-1f27-4004-8bc6-778e6db5fb94" (UID: "02015e4f-1f27-4004-8bc6-778e6db5fb94"). InnerVolumeSpecName "kube-api-access-ppk7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.742837 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppk7z\" (UniqueName: \"kubernetes.io/projected/02015e4f-1f27-4004-8bc6-778e6db5fb94-kube-api-access-ppk7z\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.746641 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02015e4f-1f27-4004-8bc6-778e6db5fb94" (UID: "02015e4f-1f27-4004-8bc6-778e6db5fb94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.844842 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02015e4f-1f27-4004-8bc6-778e6db5fb94-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.996007 4701 generic.go:334] "Generic (PLEG): container finished" podID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerID="82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996" exitCode=0 Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.996071 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerDied","Data":"82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996"} Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.996091 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7xdws" Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.996113 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7xdws" event={"ID":"02015e4f-1f27-4004-8bc6-778e6db5fb94","Type":"ContainerDied","Data":"75b888e6efa5045f31611006d4690008362bb37ca541e3c2b01ad1726894bb6d"} Nov 21 19:04:48 crc kubenswrapper[4701]: I1121 19:04:48.996145 4701 scope.go:117] "RemoveContainer" containerID="82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.022022 4701 scope.go:117] "RemoveContainer" containerID="e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.037648 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7xdws"] Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.043605 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7xdws"] Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.064262 4701 scope.go:117] "RemoveContainer" containerID="db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.083655 4701 scope.go:117] "RemoveContainer" containerID="82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996" Nov 21 19:04:49 crc kubenswrapper[4701]: E1121 19:04:49.085493 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996\": container with ID starting with 82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996 not found: ID does not exist" containerID="82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.085587 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996"} err="failed to get container status \"82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996\": rpc error: code = NotFound desc = could not find container \"82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996\": container with ID starting with 82cf5cbed717bfbe26c2b8bc8eab65535b1445e6eee9b2a2849f565fb584e996 not found: ID does not exist" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.085678 4701 scope.go:117] "RemoveContainer" containerID="e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03" Nov 21 19:04:49 crc kubenswrapper[4701]: E1121 19:04:49.087718 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03\": container with ID starting with e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03 not found: ID does not exist" containerID="e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.087810 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03"} err="failed to get container status \"e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03\": rpc error: code = NotFound desc = could not find container \"e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03\": container with ID starting with e245b68a873c1dd1798a972fc20421b035bbae79eaefa9ae08f1be7978ef8b03 not found: ID does not exist" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.087870 4701 scope.go:117] "RemoveContainer" containerID="db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684" Nov 21 19:04:49 crc kubenswrapper[4701]: E1121 19:04:49.088456 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684\": container with ID starting with db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684 not found: ID does not exist" containerID="db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.088537 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684"} err="failed to get container status \"db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684\": rpc error: code = NotFound desc = could not find container \"db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684\": container with ID starting with db11f2fe6a88b2a53c4448925d4c6aecdec09a93b14e8a41786dfbf12b884684 not found: ID does not exist" Nov 21 19:04:49 crc kubenswrapper[4701]: I1121 19:04:49.986915 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" path="/var/lib/kubelet/pods/02015e4f-1f27-4004-8bc6-778e6db5fb94/volumes" Nov 21 19:04:53 crc kubenswrapper[4701]: I1121 19:04:53.703752 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" podUID="a2f5b911-dc9c-4009-a4b0-da201a34f156" containerName="oauth-openshift" containerID="cri-o://7c2c493ff54092ed7869db47f255b23f0ac1377ef540066b81c93355a0bd42d9" gracePeriod=15 Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.065047 4701 generic.go:334] "Generic (PLEG): container finished" podID="a2f5b911-dc9c-4009-a4b0-da201a34f156" containerID="7c2c493ff54092ed7869db47f255b23f0ac1377ef540066b81c93355a0bd42d9" exitCode=0 Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.065111 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" event={"ID":"a2f5b911-dc9c-4009-a4b0-da201a34f156","Type":"ContainerDied","Data":"7c2c493ff54092ed7869db47f255b23f0ac1377ef540066b81c93355a0bd42d9"} Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.148054 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.250859 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-serving-cert\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.251463 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-idp-0-file-data\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.251747 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-dir\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.251914 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-policies\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.252137 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-router-certs\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.251929 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.253246 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.253256 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.253703 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-cliconfig\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.253923 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-error\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.254080 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-login\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.255079 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-service-ca\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.256730 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-provider-selection\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.257013 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-session\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.257184 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-ocp-branding-template\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.257381 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt2mw\" (UniqueName: \"kubernetes.io/projected/a2f5b911-dc9c-4009-a4b0-da201a34f156-kube-api-access-rt2mw\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.257581 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-trusted-ca-bundle\") pod \"a2f5b911-dc9c-4009-a4b0-da201a34f156\" (UID: \"a2f5b911-dc9c-4009-a4b0-da201a34f156\") " Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.258263 4701 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.258436 4701 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.258568 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.257634 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.259095 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.264066 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.265308 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.266161 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.267314 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.267877 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.268277 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.268509 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2f5b911-dc9c-4009-a4b0-da201a34f156-kube-api-access-rt2mw" (OuterVolumeSpecName: "kube-api-access-rt2mw") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "kube-api-access-rt2mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.268531 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.269239 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a2f5b911-dc9c-4009-a4b0-da201a34f156" (UID: "a2f5b911-dc9c-4009-a4b0-da201a34f156"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360669 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360755 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360785 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360810 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360831 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt2mw\" (UniqueName: \"kubernetes.io/projected/a2f5b911-dc9c-4009-a4b0-da201a34f156-kube-api-access-rt2mw\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360855 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360874 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360894 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360916 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360935 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:54 crc kubenswrapper[4701]: I1121 19:04:54.360955 4701 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a2f5b911-dc9c-4009-a4b0-da201a34f156-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 21 19:04:55 crc kubenswrapper[4701]: I1121 19:04:55.075039 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" event={"ID":"a2f5b911-dc9c-4009-a4b0-da201a34f156","Type":"ContainerDied","Data":"134ce6681c8b82bb4948b14c900dadacc393dbabf38ee53e550957af05842067"} Nov 21 19:04:55 crc kubenswrapper[4701]: I1121 19:04:55.075118 4701 scope.go:117] "RemoveContainer" containerID="7c2c493ff54092ed7869db47f255b23f0ac1377ef540066b81c93355a0bd42d9" Nov 21 19:04:55 crc kubenswrapper[4701]: I1121 19:04:55.075146 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-twmdp" Nov 21 19:04:55 crc kubenswrapper[4701]: I1121 19:04:55.126124 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-twmdp"] Nov 21 19:04:55 crc kubenswrapper[4701]: I1121 19:04:55.131560 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-twmdp"] Nov 21 19:04:55 crc kubenswrapper[4701]: I1121 19:04:55.965317 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2f5b911-dc9c-4009-a4b0-da201a34f156" path="/var/lib/kubelet/pods/a2f5b911-dc9c-4009-a4b0-da201a34f156/volumes" Nov 21 19:04:57 crc kubenswrapper[4701]: I1121 19:04:57.876627 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.255025 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-5494594499-4w6d2"] Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256183 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256232 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256252 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2f5b911-dc9c-4009-a4b0-da201a34f156" containerName="oauth-openshift" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256266 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2f5b911-dc9c-4009-a4b0-da201a34f156" containerName="oauth-openshift" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256285 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276e2cb3-e02e-4122-b10b-a454198b7954" containerName="collect-profiles" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256299 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="276e2cb3-e02e-4122-b10b-a454198b7954" containerName="collect-profiles" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256318 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256331 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256349 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256363 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256379 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256391 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256406 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256418 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256443 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256455 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256472 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256484 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256503 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256517 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256534 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256547 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="extract-utilities" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256560 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256573 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256595 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256608 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256626 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="684dae85-0eb2-464b-8913-231a41728798" containerName="pruner" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256639 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="684dae85-0eb2-464b-8913-231a41728798" containerName="pruner" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256658 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acc49089-393a-4ff8-9d87-f181dc8d45f1" containerName="pruner" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256670 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="acc49089-393a-4ff8-9d87-f181dc8d45f1" containerName="pruner" Nov 21 19:05:02 crc kubenswrapper[4701]: E1121 19:05:02.256686 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256700 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="extract-content" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256950 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="02015e4f-1f27-4004-8bc6-778e6db5fb94" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256976 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b7dd16f-9a0f-406f-84b4-cc94baf5405c" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.256995 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d0f6c12-cd85-4473-812f-d8fcffb2742e" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.257014 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2f5b911-dc9c-4009-a4b0-da201a34f156" containerName="oauth-openshift" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.257033 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="684dae85-0eb2-464b-8913-231a41728798" containerName="pruner" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.257050 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="acc49089-393a-4ff8-9d87-f181dc8d45f1" containerName="pruner" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.257067 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="276e2cb3-e02e-4122-b10b-a454198b7954" containerName="collect-profiles" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.257082 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="53624f72-bfb0-4be5-b00a-f06de73ae1f0" containerName="registry-server" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.257727 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.266261 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.267414 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.268701 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.269293 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.269423 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.269501 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.269728 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.272698 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.273769 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.272735 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.279031 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.279502 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283279 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-session\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283355 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283402 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmv5l\" (UniqueName: \"kubernetes.io/projected/76acb27a-d41b-41fe-b378-5e8a38708e03-kube-api-access-nmv5l\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283441 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-error\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283506 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-service-ca\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283560 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-router-certs\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283607 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/76acb27a-d41b-41fe-b378-5e8a38708e03-audit-dir\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283645 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283685 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283756 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-audit-policies\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283791 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283843 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283898 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-login\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.283952 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.286599 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5494594499-4w6d2"] Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.287436 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.295773 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.305912 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384706 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-login\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384770 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384802 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-session\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384821 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384839 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmv5l\" (UniqueName: \"kubernetes.io/projected/76acb27a-d41b-41fe-b378-5e8a38708e03-kube-api-access-nmv5l\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384855 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-error\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384871 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-service-ca\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384894 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-router-certs\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384919 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/76acb27a-d41b-41fe-b378-5e8a38708e03-audit-dir\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384935 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384952 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.384983 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-audit-policies\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.385001 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.385023 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.385957 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.386014 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/76acb27a-d41b-41fe-b378-5e8a38708e03-audit-dir\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.387366 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-audit-policies\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.388455 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-service-ca\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.388622 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.402304 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.402355 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-error\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.402420 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-router-certs\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.402976 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.403185 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-session\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.403822 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-user-template-login\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.406867 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.409030 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmv5l\" (UniqueName: \"kubernetes.io/projected/76acb27a-d41b-41fe-b378-5e8a38708e03-kube-api-access-nmv5l\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.410029 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/76acb27a-d41b-41fe-b378-5e8a38708e03-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5494594499-4w6d2\" (UID: \"76acb27a-d41b-41fe-b378-5e8a38708e03\") " pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.596239 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:02 crc kubenswrapper[4701]: I1121 19:05:02.838825 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5494594499-4w6d2"] Nov 21 19:05:03 crc kubenswrapper[4701]: I1121 19:05:03.137118 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" event={"ID":"76acb27a-d41b-41fe-b378-5e8a38708e03","Type":"ContainerStarted","Data":"d054bebf6da97b38e9658da99f04fa56a343616dff78c247f545f31b9b26429b"} Nov 21 19:05:04 crc kubenswrapper[4701]: I1121 19:05:04.147609 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" event={"ID":"76acb27a-d41b-41fe-b378-5e8a38708e03","Type":"ContainerStarted","Data":"13226c5b0049e96997dc5f495fa57ad7a8fb4af1aefab0be88c93c04a1bb82bf"} Nov 21 19:05:04 crc kubenswrapper[4701]: I1121 19:05:04.150320 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:04 crc kubenswrapper[4701]: I1121 19:05:04.160509 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" Nov 21 19:05:04 crc kubenswrapper[4701]: I1121 19:05:04.185718 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5494594499-4w6d2" podStartSLOduration=36.185689006 podStartE2EDuration="36.185689006s" podCreationTimestamp="2025-11-21 19:04:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:05:04.184161526 +0000 UTC m=+194.969301563" watchObservedRunningTime="2025-11-21 19:05:04.185689006 +0000 UTC m=+194.970829063" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.164163 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nkl92"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.165576 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nkl92" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="registry-server" containerID="cri-o://aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df" gracePeriod=30 Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.173418 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r9glm"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.173744 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r9glm" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="registry-server" containerID="cri-o://93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d" gracePeriod=30 Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.191019 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v6w6b"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.191384 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" containerID="cri-o://ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7" gracePeriod=30 Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.194701 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcsbh"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.194978 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rcsbh" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="registry-server" containerID="cri-o://a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c" gracePeriod=30 Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.200021 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w6nbm"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.200249 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7grnb"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.200395 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w6nbm" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="registry-server" containerID="cri-o://b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d" gracePeriod=30 Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.201340 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.212107 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7grnb"] Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.291275 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be676bd8-0b5e-48b4-829b-021f132d3247-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.291352 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be676bd8-0b5e-48b4-829b-021f132d3247-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.291742 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsf6f\" (UniqueName: \"kubernetes.io/projected/be676bd8-0b5e-48b4-829b-021f132d3247-kube-api-access-bsf6f\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.393040 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsf6f\" (UniqueName: \"kubernetes.io/projected/be676bd8-0b5e-48b4-829b-021f132d3247-kube-api-access-bsf6f\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.393490 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be676bd8-0b5e-48b4-829b-021f132d3247-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.393517 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be676bd8-0b5e-48b4-829b-021f132d3247-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.394751 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be676bd8-0b5e-48b4-829b-021f132d3247-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.408774 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be676bd8-0b5e-48b4-829b-021f132d3247-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.410891 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsf6f\" (UniqueName: \"kubernetes.io/projected/be676bd8-0b5e-48b4-829b-021f132d3247-kube-api-access-bsf6f\") pod \"marketplace-operator-79b997595-7grnb\" (UID: \"be676bd8-0b5e-48b4-829b-021f132d3247\") " pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.539269 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.613492 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.614743 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.614962 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.616330 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.616565 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322" gracePeriod=600 Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.636612 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.674345 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.677964 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.683697 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.687063 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.696408 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76lkf\" (UniqueName: \"kubernetes.io/projected/cb02901b-e5a6-4059-b49c-9011bfb481c9-kube-api-access-76lkf\") pod \"cb02901b-e5a6-4059-b49c-9011bfb481c9\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.696503 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-utilities\") pod \"cb02901b-e5a6-4059-b49c-9011bfb481c9\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.696590 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-catalog-content\") pod \"cb02901b-e5a6-4059-b49c-9011bfb481c9\" (UID: \"cb02901b-e5a6-4059-b49c-9011bfb481c9\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.697803 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-utilities" (OuterVolumeSpecName: "utilities") pod "cb02901b-e5a6-4059-b49c-9011bfb481c9" (UID: "cb02901b-e5a6-4059-b49c-9011bfb481c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.701569 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.708680 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb02901b-e5a6-4059-b49c-9011bfb481c9-kube-api-access-76lkf" (OuterVolumeSpecName: "kube-api-access-76lkf") pod "cb02901b-e5a6-4059-b49c-9011bfb481c9" (UID: "cb02901b-e5a6-4059-b49c-9011bfb481c9"). InnerVolumeSpecName "kube-api-access-76lkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.771152 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb02901b-e5a6-4059-b49c-9011bfb481c9" (UID: "cb02901b-e5a6-4059-b49c-9011bfb481c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803006 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-operator-metrics\") pod \"b3e75990-afff-41bb-a78e-3d04223bbb6c\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803084 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-catalog-content\") pod \"795df5e4-162f-49a8-8316-2307c03a3f2d\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803122 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slgtp\" (UniqueName: \"kubernetes.io/projected/795df5e4-162f-49a8-8316-2307c03a3f2d-kube-api-access-slgtp\") pod \"795df5e4-162f-49a8-8316-2307c03a3f2d\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803154 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-utilities\") pod \"795df5e4-162f-49a8-8316-2307c03a3f2d\" (UID: \"795df5e4-162f-49a8-8316-2307c03a3f2d\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803172 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-utilities\") pod \"dde062d0-393b-4b35-80ec-c3f67c2a5129\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803242 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dlbd\" (UniqueName: \"kubernetes.io/projected/dde062d0-393b-4b35-80ec-c3f67c2a5129-kube-api-access-9dlbd\") pod \"dde062d0-393b-4b35-80ec-c3f67c2a5129\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803267 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fh648\" (UniqueName: \"kubernetes.io/projected/90bb3a29-464a-4967-a009-d4fa6b92de73-kube-api-access-fh648\") pod \"90bb3a29-464a-4967-a009-d4fa6b92de73\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803306 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtm8m\" (UniqueName: \"kubernetes.io/projected/b3e75990-afff-41bb-a78e-3d04223bbb6c-kube-api-access-mtm8m\") pod \"b3e75990-afff-41bb-a78e-3d04223bbb6c\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803332 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-catalog-content\") pod \"dde062d0-393b-4b35-80ec-c3f67c2a5129\" (UID: \"dde062d0-393b-4b35-80ec-c3f67c2a5129\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803355 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-catalog-content\") pod \"90bb3a29-464a-4967-a009-d4fa6b92de73\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803379 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-trusted-ca\") pod \"b3e75990-afff-41bb-a78e-3d04223bbb6c\" (UID: \"b3e75990-afff-41bb-a78e-3d04223bbb6c\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803398 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-utilities\") pod \"90bb3a29-464a-4967-a009-d4fa6b92de73\" (UID: \"90bb3a29-464a-4967-a009-d4fa6b92de73\") " Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803595 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb02901b-e5a6-4059-b49c-9011bfb481c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.803609 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76lkf\" (UniqueName: \"kubernetes.io/projected/cb02901b-e5a6-4059-b49c-9011bfb481c9-kube-api-access-76lkf\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.804292 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-utilities" (OuterVolumeSpecName: "utilities") pod "90bb3a29-464a-4967-a009-d4fa6b92de73" (UID: "90bb3a29-464a-4967-a009-d4fa6b92de73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.807265 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-utilities" (OuterVolumeSpecName: "utilities") pod "795df5e4-162f-49a8-8316-2307c03a3f2d" (UID: "795df5e4-162f-49a8-8316-2307c03a3f2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.809403 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90bb3a29-464a-4967-a009-d4fa6b92de73-kube-api-access-fh648" (OuterVolumeSpecName: "kube-api-access-fh648") pod "90bb3a29-464a-4967-a009-d4fa6b92de73" (UID: "90bb3a29-464a-4967-a009-d4fa6b92de73"). InnerVolumeSpecName "kube-api-access-fh648". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.809445 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-utilities" (OuterVolumeSpecName: "utilities") pod "dde062d0-393b-4b35-80ec-c3f67c2a5129" (UID: "dde062d0-393b-4b35-80ec-c3f67c2a5129"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.809637 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b3e75990-afff-41bb-a78e-3d04223bbb6c" (UID: "b3e75990-afff-41bb-a78e-3d04223bbb6c"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.809774 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b3e75990-afff-41bb-a78e-3d04223bbb6c" (UID: "b3e75990-afff-41bb-a78e-3d04223bbb6c"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.810983 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3e75990-afff-41bb-a78e-3d04223bbb6c-kube-api-access-mtm8m" (OuterVolumeSpecName: "kube-api-access-mtm8m") pod "b3e75990-afff-41bb-a78e-3d04223bbb6c" (UID: "b3e75990-afff-41bb-a78e-3d04223bbb6c"). InnerVolumeSpecName "kube-api-access-mtm8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.812753 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dde062d0-393b-4b35-80ec-c3f67c2a5129-kube-api-access-9dlbd" (OuterVolumeSpecName: "kube-api-access-9dlbd") pod "dde062d0-393b-4b35-80ec-c3f67c2a5129" (UID: "dde062d0-393b-4b35-80ec-c3f67c2a5129"). InnerVolumeSpecName "kube-api-access-9dlbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.812696 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/795df5e4-162f-49a8-8316-2307c03a3f2d-kube-api-access-slgtp" (OuterVolumeSpecName: "kube-api-access-slgtp") pod "795df5e4-162f-49a8-8316-2307c03a3f2d" (UID: "795df5e4-162f-49a8-8316-2307c03a3f2d"). InnerVolumeSpecName "kube-api-access-slgtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.847212 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "795df5e4-162f-49a8-8316-2307c03a3f2d" (UID: "795df5e4-162f-49a8-8316-2307c03a3f2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.870378 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dde062d0-393b-4b35-80ec-c3f67c2a5129" (UID: "dde062d0-393b-4b35-80ec-c3f67c2a5129"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904853 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904889 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slgtp\" (UniqueName: \"kubernetes.io/projected/795df5e4-162f-49a8-8316-2307c03a3f2d-kube-api-access-slgtp\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904901 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/795df5e4-162f-49a8-8316-2307c03a3f2d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904909 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904917 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dlbd\" (UniqueName: \"kubernetes.io/projected/dde062d0-393b-4b35-80ec-c3f67c2a5129-kube-api-access-9dlbd\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904928 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fh648\" (UniqueName: \"kubernetes.io/projected/90bb3a29-464a-4967-a009-d4fa6b92de73-kube-api-access-fh648\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904940 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtm8m\" (UniqueName: \"kubernetes.io/projected/b3e75990-afff-41bb-a78e-3d04223bbb6c-kube-api-access-mtm8m\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904949 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde062d0-393b-4b35-80ec-c3f67c2a5129-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904957 4701 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904965 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.904972 4701 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b3e75990-afff-41bb-a78e-3d04223bbb6c-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:18 crc kubenswrapper[4701]: I1121 19:05:18.914545 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90bb3a29-464a-4967-a009-d4fa6b92de73" (UID: "90bb3a29-464a-4967-a009-d4fa6b92de73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.007697 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bb3a29-464a-4967-a009-d4fa6b92de73-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.026240 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7grnb"] Nov 21 19:05:19 crc kubenswrapper[4701]: W1121 19:05:19.029764 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe676bd8_0b5e_48b4_829b_021f132d3247.slice/crio-def720676e115bc8e9c4323fd7e35207d533dc6d1bc082e6d938e31b23d4264b WatchSource:0}: Error finding container def720676e115bc8e9c4323fd7e35207d533dc6d1bc082e6d938e31b23d4264b: Status 404 returned error can't find the container with id def720676e115bc8e9c4323fd7e35207d533dc6d1bc082e6d938e31b23d4264b Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.269905 4701 generic.go:334] "Generic (PLEG): container finished" podID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerID="ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7" exitCode=0 Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.270382 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" event={"ID":"b3e75990-afff-41bb-a78e-3d04223bbb6c","Type":"ContainerDied","Data":"ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.270520 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.270619 4701 scope.go:117] "RemoveContainer" containerID="ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.270591 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v6w6b" event={"ID":"b3e75990-afff-41bb-a78e-3d04223bbb6c","Type":"ContainerDied","Data":"7b635e6c22b41b7ca93a8f9cb7c7782c18c5b31dc31619e1fd1a0c9a6398af02"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.294553 4701 generic.go:334] "Generic (PLEG): container finished" podID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerID="b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d" exitCode=0 Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.294937 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6nbm" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.295176 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerDied","Data":"b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.295563 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6nbm" event={"ID":"90bb3a29-464a-4967-a009-d4fa6b92de73","Type":"ContainerDied","Data":"403d68382d830c6809c21c6c8a81e267a183c192411b971ae0df4e8124bd512c"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.296497 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" event={"ID":"be676bd8-0b5e-48b4-829b-021f132d3247","Type":"ContainerStarted","Data":"afdefc7dd3a311411bfd259d87b26871caabead547407456ab4ef984c5d9a5e9"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.296549 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" event={"ID":"be676bd8-0b5e-48b4-829b-021f132d3247","Type":"ContainerStarted","Data":"def720676e115bc8e9c4323fd7e35207d533dc6d1bc082e6d938e31b23d4264b"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.296952 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.298857 4701 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-7grnb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" start-of-body= Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.298899 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" podUID="be676bd8-0b5e-48b4-829b-021f132d3247" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.299079 4701 generic.go:334] "Generic (PLEG): container finished" podID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerID="aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df" exitCode=0 Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.299137 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkl92" event={"ID":"cb02901b-e5a6-4059-b49c-9011bfb481c9","Type":"ContainerDied","Data":"aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.299157 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkl92" event={"ID":"cb02901b-e5a6-4059-b49c-9011bfb481c9","Type":"ContainerDied","Data":"45ad0c783eb40d6e26a8e6fd156d57d7e9bce257208d4435c67ca9e774188ee2"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.299243 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkl92" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.302728 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322" exitCode=0 Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.302776 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.302796 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"4588b2a736d568e8f69ecdacc0ee6977f154eb82e175accdf7d81cf19a181fd6"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.308738 4701 scope.go:117] "RemoveContainer" containerID="ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.309360 4701 generic.go:334] "Generic (PLEG): container finished" podID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerID="a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c" exitCode=0 Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.309412 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcsbh" event={"ID":"795df5e4-162f-49a8-8316-2307c03a3f2d","Type":"ContainerDied","Data":"a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.309433 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcsbh" event={"ID":"795df5e4-162f-49a8-8316-2307c03a3f2d","Type":"ContainerDied","Data":"a3cacd87e6814702b8dac247ccc17ddd1dfa12c5f4ac23f384e91c3dfcc886c2"} Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.309474 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7\": container with ID starting with ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7 not found: ID does not exist" containerID="ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.309499 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7"} err="failed to get container status \"ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7\": rpc error: code = NotFound desc = could not find container \"ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7\": container with ID starting with ff4839db168ea0caffb9a0155f7fe2524782b1a4b509d272a4b7c2bf650159c7 not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.309516 4701 scope.go:117] "RemoveContainer" containerID="b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.309601 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcsbh" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.320809 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v6w6b"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.324400 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v6w6b"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.326553 4701 generic.go:334] "Generic (PLEG): container finished" podID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerID="93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d" exitCode=0 Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.326637 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9glm" event={"ID":"dde062d0-393b-4b35-80ec-c3f67c2a5129","Type":"ContainerDied","Data":"93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.326676 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9glm" event={"ID":"dde062d0-393b-4b35-80ec-c3f67c2a5129","Type":"ContainerDied","Data":"f9f409d94f6458181b5001b9cd9a5b1f664ebf69bfbe98b35903f7c81632f72c"} Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.326774 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9glm" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.351903 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" podStartSLOduration=1.3518807769999999 podStartE2EDuration="1.351880777s" podCreationTimestamp="2025-11-21 19:05:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:05:19.32970267 +0000 UTC m=+210.114842697" watchObservedRunningTime="2025-11-21 19:05:19.351880777 +0000 UTC m=+210.137020814" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.373269 4701 scope.go:117] "RemoveContainer" containerID="6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.392368 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nkl92"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.396855 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nkl92"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.403649 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcsbh"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.408076 4701 scope.go:117] "RemoveContainer" containerID="3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.415468 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcsbh"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.419096 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w6nbm"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.424515 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w6nbm"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.435687 4701 scope.go:117] "RemoveContainer" containerID="b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.436334 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d\": container with ID starting with b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d not found: ID does not exist" containerID="b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.436374 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d"} err="failed to get container status \"b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d\": rpc error: code = NotFound desc = could not find container \"b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d\": container with ID starting with b3b190cfa646af1c606c1e79ac5419cffce4bd9f594a73e7f58da0746a508a9d not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.436417 4701 scope.go:117] "RemoveContainer" containerID="6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.436821 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377\": container with ID starting with 6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377 not found: ID does not exist" containerID="6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.436853 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377"} err="failed to get container status \"6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377\": rpc error: code = NotFound desc = could not find container \"6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377\": container with ID starting with 6bda89ab6f5c63e512fe189281514f22034c217c366f9030935a2eca0035e377 not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.436872 4701 scope.go:117] "RemoveContainer" containerID="3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.437326 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e\": container with ID starting with 3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e not found: ID does not exist" containerID="3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.437456 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e"} err="failed to get container status \"3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e\": rpc error: code = NotFound desc = could not find container \"3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e\": container with ID starting with 3590a0b1faa20a988f3a5d2e50d370fad2b71b4035babb5c8fe12f0353d6b85e not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.437582 4701 scope.go:117] "RemoveContainer" containerID="aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.437925 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r9glm"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.440340 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r9glm"] Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.450695 4701 scope.go:117] "RemoveContainer" containerID="99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.468060 4701 scope.go:117] "RemoveContainer" containerID="5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.499572 4701 scope.go:117] "RemoveContainer" containerID="aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.502100 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df\": container with ID starting with aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df not found: ID does not exist" containerID="aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.502159 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df"} err="failed to get container status \"aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df\": rpc error: code = NotFound desc = could not find container \"aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df\": container with ID starting with aa72129c8d9034c18124beafd2bd77e723c7fe5feeee471107cdcaee2ebe58df not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.502333 4701 scope.go:117] "RemoveContainer" containerID="99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.503083 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf\": container with ID starting with 99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf not found: ID does not exist" containerID="99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.503157 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf"} err="failed to get container status \"99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf\": rpc error: code = NotFound desc = could not find container \"99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf\": container with ID starting with 99a642217bc52a030908b2ffcb8e2e44a53a3e41ab95443ef286e87e87d85cbf not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.503221 4701 scope.go:117] "RemoveContainer" containerID="5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.503794 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae\": container with ID starting with 5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae not found: ID does not exist" containerID="5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.503860 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae"} err="failed to get container status \"5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae\": rpc error: code = NotFound desc = could not find container \"5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae\": container with ID starting with 5dd0b85bdc63f3422ee1dd7700fbb96388fe91dc231674765ed87a7d87b2aaae not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.503895 4701 scope.go:117] "RemoveContainer" containerID="a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.523445 4701 scope.go:117] "RemoveContainer" containerID="2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.539559 4701 scope.go:117] "RemoveContainer" containerID="0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.555867 4701 scope.go:117] "RemoveContainer" containerID="a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.556394 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c\": container with ID starting with a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c not found: ID does not exist" containerID="a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.556444 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c"} err="failed to get container status \"a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c\": rpc error: code = NotFound desc = could not find container \"a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c\": container with ID starting with a33d080b184cc4311b018b3d0623844263f28bf5e6d75d98001c3160b97e6e9c not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.556482 4701 scope.go:117] "RemoveContainer" containerID="2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.556882 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb\": container with ID starting with 2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb not found: ID does not exist" containerID="2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.556917 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb"} err="failed to get container status \"2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb\": rpc error: code = NotFound desc = could not find container \"2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb\": container with ID starting with 2419aa2171a13f2dc1827d518625a3ee1ba1cf19d1cb42a7d5cd115851b69eeb not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.556961 4701 scope.go:117] "RemoveContainer" containerID="0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.557595 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8\": container with ID starting with 0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8 not found: ID does not exist" containerID="0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.557662 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8"} err="failed to get container status \"0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8\": rpc error: code = NotFound desc = could not find container \"0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8\": container with ID starting with 0c65112be33de7d23a36799d00b0101f6ad3c5abd1be84347da3b07fcf81d4e8 not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.557704 4701 scope.go:117] "RemoveContainer" containerID="93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.572042 4701 scope.go:117] "RemoveContainer" containerID="3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.593654 4701 scope.go:117] "RemoveContainer" containerID="a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.610655 4701 scope.go:117] "RemoveContainer" containerID="93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.611216 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d\": container with ID starting with 93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d not found: ID does not exist" containerID="93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.611271 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d"} err="failed to get container status \"93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d\": rpc error: code = NotFound desc = could not find container \"93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d\": container with ID starting with 93adbc1daefc1c0518dcc2ee8b1d3b1e79d9c3ecc68f05ebc94ffc93503ee87d not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.611308 4701 scope.go:117] "RemoveContainer" containerID="3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.611679 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243\": container with ID starting with 3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243 not found: ID does not exist" containerID="3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.611725 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243"} err="failed to get container status \"3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243\": rpc error: code = NotFound desc = could not find container \"3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243\": container with ID starting with 3c2dd72634ab5688815daf2bcfacb79ebd17a0ae4a74f64aaa447f36163b6243 not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.611760 4701 scope.go:117] "RemoveContainer" containerID="a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d" Nov 21 19:05:19 crc kubenswrapper[4701]: E1121 19:05:19.612386 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d\": container with ID starting with a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d not found: ID does not exist" containerID="a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.612416 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d"} err="failed to get container status \"a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d\": rpc error: code = NotFound desc = could not find container \"a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d\": container with ID starting with a008b5c6dae2c78356f388638ab94280f58c81eb9ac32a90db7c9432dfebba2d not found: ID does not exist" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.959086 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" path="/var/lib/kubelet/pods/795df5e4-162f-49a8-8316-2307c03a3f2d/volumes" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.960108 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" path="/var/lib/kubelet/pods/90bb3a29-464a-4967-a009-d4fa6b92de73/volumes" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.960872 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" path="/var/lib/kubelet/pods/b3e75990-afff-41bb-a78e-3d04223bbb6c/volumes" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.962094 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" path="/var/lib/kubelet/pods/cb02901b-e5a6-4059-b49c-9011bfb481c9/volumes" Nov 21 19:05:19 crc kubenswrapper[4701]: I1121 19:05:19.962813 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" path="/var/lib/kubelet/pods/dde062d0-393b-4b35-80ec-c3f67c2a5129/volumes" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.348615 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-7grnb" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.380980 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ctfn6"] Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381416 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381440 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381455 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381462 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381471 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381479 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381492 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381498 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381506 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381513 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381526 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381532 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381546 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381556 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381566 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381573 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381611 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381618 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381627 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381636 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381643 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381650 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381658 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381666 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="extract-utilities" Nov 21 19:05:20 crc kubenswrapper[4701]: E1121 19:05:20.381676 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381687 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="extract-content" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381813 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3e75990-afff-41bb-a78e-3d04223bbb6c" containerName="marketplace-operator" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381832 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb02901b-e5a6-4059-b49c-9011bfb481c9" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381841 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="795df5e4-162f-49a8-8316-2307c03a3f2d" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381848 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="90bb3a29-464a-4967-a009-d4fa6b92de73" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.381860 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="dde062d0-393b-4b35-80ec-c3f67c2a5129" containerName="registry-server" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.382804 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.386828 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.409937 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ctfn6"] Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.428021 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c49x\" (UniqueName: \"kubernetes.io/projected/72f39205-4f40-45be-99f5-0036f0da7491-kube-api-access-5c49x\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.428088 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72f39205-4f40-45be-99f5-0036f0da7491-utilities\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.428145 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72f39205-4f40-45be-99f5-0036f0da7491-catalog-content\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.532169 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c49x\" (UniqueName: \"kubernetes.io/projected/72f39205-4f40-45be-99f5-0036f0da7491-kube-api-access-5c49x\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.532658 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72f39205-4f40-45be-99f5-0036f0da7491-utilities\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.532698 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72f39205-4f40-45be-99f5-0036f0da7491-catalog-content\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.533244 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72f39205-4f40-45be-99f5-0036f0da7491-catalog-content\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.533345 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72f39205-4f40-45be-99f5-0036f0da7491-utilities\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.555916 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c49x\" (UniqueName: \"kubernetes.io/projected/72f39205-4f40-45be-99f5-0036f0da7491-kube-api-access-5c49x\") pod \"redhat-marketplace-ctfn6\" (UID: \"72f39205-4f40-45be-99f5-0036f0da7491\") " pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.574001 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tbtgz"] Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.574914 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.577643 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.602762 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tbtgz"] Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.634426 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e81175f-5aec-4176-b6ec-d4d292063f20-utilities\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.634493 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e81175f-5aec-4176-b6ec-d4d292063f20-catalog-content\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.634590 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d5mx\" (UniqueName: \"kubernetes.io/projected/3e81175f-5aec-4176-b6ec-d4d292063f20-kube-api-access-4d5mx\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.716082 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.735530 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d5mx\" (UniqueName: \"kubernetes.io/projected/3e81175f-5aec-4176-b6ec-d4d292063f20-kube-api-access-4d5mx\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.735654 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e81175f-5aec-4176-b6ec-d4d292063f20-utilities\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.735735 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e81175f-5aec-4176-b6ec-d4d292063f20-catalog-content\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.737136 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e81175f-5aec-4176-b6ec-d4d292063f20-utilities\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.739907 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e81175f-5aec-4176-b6ec-d4d292063f20-catalog-content\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.763334 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d5mx\" (UniqueName: \"kubernetes.io/projected/3e81175f-5aec-4176-b6ec-d4d292063f20-kube-api-access-4d5mx\") pod \"redhat-operators-tbtgz\" (UID: \"3e81175f-5aec-4176-b6ec-d4d292063f20\") " pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.910924 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:20 crc kubenswrapper[4701]: I1121 19:05:20.948939 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ctfn6"] Nov 21 19:05:20 crc kubenswrapper[4701]: W1121 19:05:20.958948 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72f39205_4f40_45be_99f5_0036f0da7491.slice/crio-d968aec7091466de8bedfcad47b6f433f19c9dea819d5464b6167761361267ba WatchSource:0}: Error finding container d968aec7091466de8bedfcad47b6f433f19c9dea819d5464b6167761361267ba: Status 404 returned error can't find the container with id d968aec7091466de8bedfcad47b6f433f19c9dea819d5464b6167761361267ba Nov 21 19:05:21 crc kubenswrapper[4701]: I1121 19:05:21.374003 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tbtgz"] Nov 21 19:05:21 crc kubenswrapper[4701]: I1121 19:05:21.374664 4701 generic.go:334] "Generic (PLEG): container finished" podID="72f39205-4f40-45be-99f5-0036f0da7491" containerID="1f3ee89392ab5cd9c328b27cd61e1d319a8e0b2527e59488650780bd3e1c370a" exitCode=0 Nov 21 19:05:21 crc kubenswrapper[4701]: I1121 19:05:21.375630 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ctfn6" event={"ID":"72f39205-4f40-45be-99f5-0036f0da7491","Type":"ContainerDied","Data":"1f3ee89392ab5cd9c328b27cd61e1d319a8e0b2527e59488650780bd3e1c370a"} Nov 21 19:05:21 crc kubenswrapper[4701]: I1121 19:05:21.375665 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ctfn6" event={"ID":"72f39205-4f40-45be-99f5-0036f0da7491","Type":"ContainerStarted","Data":"d968aec7091466de8bedfcad47b6f433f19c9dea819d5464b6167761361267ba"} Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.396160 4701 generic.go:334] "Generic (PLEG): container finished" podID="3e81175f-5aec-4176-b6ec-d4d292063f20" containerID="f2dbea7ba1a4f1bcba0aad6d5fc0373735606bbbd99da229b097059186a926ef" exitCode=0 Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.396315 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbtgz" event={"ID":"3e81175f-5aec-4176-b6ec-d4d292063f20","Type":"ContainerDied","Data":"f2dbea7ba1a4f1bcba0aad6d5fc0373735606bbbd99da229b097059186a926ef"} Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.396381 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbtgz" event={"ID":"3e81175f-5aec-4176-b6ec-d4d292063f20","Type":"ContainerStarted","Data":"299cd65bd72ab778a8b865bb5c93229c227364a7f614778093be2e05d415fc21"} Nov 21 19:05:22 crc kubenswrapper[4701]: E1121 19:05:22.705594 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72f39205_4f40_45be_99f5_0036f0da7491.slice/crio-fd4ab0087b11aa3e80d61ca34108c609023c8290e2e0f29b7780d799cf48259f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72f39205_4f40_45be_99f5_0036f0da7491.slice/crio-conmon-fd4ab0087b11aa3e80d61ca34108c609023c8290e2e0f29b7780d799cf48259f.scope\": RecentStats: unable to find data in memory cache]" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.774915 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v9dtr"] Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.777736 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.782775 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.787104 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v9dtr"] Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.865911 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-utilities\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.866095 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-catalog-content\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.866171 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhdrz\" (UniqueName: \"kubernetes.io/projected/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-kube-api-access-xhdrz\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.969467 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-utilities\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.968786 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-utilities\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.970752 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-catalog-content\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.970857 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhdrz\" (UniqueName: \"kubernetes.io/projected/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-kube-api-access-xhdrz\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.972767 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-catalog-content\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.990391 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2r5kn"] Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.991857 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:22 crc kubenswrapper[4701]: I1121 19:05:22.996058 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.012329 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhdrz\" (UniqueName: \"kubernetes.io/projected/08bf0aaf-b621-48f2-b2b1-c6939a9a3440-kube-api-access-xhdrz\") pod \"certified-operators-v9dtr\" (UID: \"08bf0aaf-b621-48f2-b2b1-c6939a9a3440\") " pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.019377 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2r5kn"] Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.072732 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-catalog-content\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.072798 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-utilities\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.072917 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dgmh\" (UniqueName: \"kubernetes.io/projected/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-kube-api-access-6dgmh\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.113835 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.174635 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dgmh\" (UniqueName: \"kubernetes.io/projected/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-kube-api-access-6dgmh\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.174736 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-catalog-content\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.174762 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-utilities\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.175735 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-catalog-content\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.175782 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-utilities\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.195403 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dgmh\" (UniqueName: \"kubernetes.io/projected/83bec736-5bd4-4889-b0fe-864eaa0fcb3a-kube-api-access-6dgmh\") pod \"community-operators-2r5kn\" (UID: \"83bec736-5bd4-4889-b0fe-864eaa0fcb3a\") " pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.354569 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.411996 4701 generic.go:334] "Generic (PLEG): container finished" podID="72f39205-4f40-45be-99f5-0036f0da7491" containerID="fd4ab0087b11aa3e80d61ca34108c609023c8290e2e0f29b7780d799cf48259f" exitCode=0 Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.414921 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ctfn6" event={"ID":"72f39205-4f40-45be-99f5-0036f0da7491","Type":"ContainerDied","Data":"fd4ab0087b11aa3e80d61ca34108c609023c8290e2e0f29b7780d799cf48259f"} Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.535472 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v9dtr"] Nov 21 19:05:23 crc kubenswrapper[4701]: I1121 19:05:23.543125 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2r5kn"] Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.422259 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ctfn6" event={"ID":"72f39205-4f40-45be-99f5-0036f0da7491","Type":"ContainerStarted","Data":"be0b5ed9ddca6d61b852e1d06874ee2aa67043299314439c4bbedd782eaf42ae"} Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.424470 4701 generic.go:334] "Generic (PLEG): container finished" podID="3e81175f-5aec-4176-b6ec-d4d292063f20" containerID="b53d513325b1d50c3c919f26daff9f7ca3e94e1cc78756a0663e153d7c1e1dcb" exitCode=0 Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.424591 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbtgz" event={"ID":"3e81175f-5aec-4176-b6ec-d4d292063f20","Type":"ContainerDied","Data":"b53d513325b1d50c3c919f26daff9f7ca3e94e1cc78756a0663e153d7c1e1dcb"} Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.426800 4701 generic.go:334] "Generic (PLEG): container finished" podID="83bec736-5bd4-4889-b0fe-864eaa0fcb3a" containerID="e08d08109527a58008428c7604cd809a66d2c5f515077e72280198f0527daef5" exitCode=0 Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.426879 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r5kn" event={"ID":"83bec736-5bd4-4889-b0fe-864eaa0fcb3a","Type":"ContainerDied","Data":"e08d08109527a58008428c7604cd809a66d2c5f515077e72280198f0527daef5"} Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.426929 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r5kn" event={"ID":"83bec736-5bd4-4889-b0fe-864eaa0fcb3a","Type":"ContainerStarted","Data":"dcdef3b3792aa32d3195d9c5aa2c5ca7a5f97ed45bb0014de1c1d87803116a1a"} Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.430717 4701 generic.go:334] "Generic (PLEG): container finished" podID="08bf0aaf-b621-48f2-b2b1-c6939a9a3440" containerID="1277522ce0137a9a026c020d00d6cc7dca210e5f19f91987c7470217aded8e5b" exitCode=0 Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.430771 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9dtr" event={"ID":"08bf0aaf-b621-48f2-b2b1-c6939a9a3440","Type":"ContainerDied","Data":"1277522ce0137a9a026c020d00d6cc7dca210e5f19f91987c7470217aded8e5b"} Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.430802 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9dtr" event={"ID":"08bf0aaf-b621-48f2-b2b1-c6939a9a3440","Type":"ContainerStarted","Data":"7e54ebf6539c9e650a75489298c4a14cd8bf5a65c9286f6f0a4068fb4b86c0e5"} Nov 21 19:05:24 crc kubenswrapper[4701]: I1121 19:05:24.449008 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ctfn6" podStartSLOduration=2.006565648 podStartE2EDuration="4.448981488s" podCreationTimestamp="2025-11-21 19:05:20 +0000 UTC" firstStartedPulling="2025-11-21 19:05:21.379925586 +0000 UTC m=+212.165065623" lastFinishedPulling="2025-11-21 19:05:23.822341426 +0000 UTC m=+214.607481463" observedRunningTime="2025-11-21 19:05:24.444452903 +0000 UTC m=+215.229592970" watchObservedRunningTime="2025-11-21 19:05:24.448981488 +0000 UTC m=+215.234121555" Nov 21 19:05:25 crc kubenswrapper[4701]: I1121 19:05:25.440180 4701 generic.go:334] "Generic (PLEG): container finished" podID="08bf0aaf-b621-48f2-b2b1-c6939a9a3440" containerID="9d920c4eed7b341bad720a19643e99d969f34f57ad6600fc59697edd3b428682" exitCode=0 Nov 21 19:05:25 crc kubenswrapper[4701]: I1121 19:05:25.440298 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9dtr" event={"ID":"08bf0aaf-b621-48f2-b2b1-c6939a9a3440","Type":"ContainerDied","Data":"9d920c4eed7b341bad720a19643e99d969f34f57ad6600fc59697edd3b428682"} Nov 21 19:05:25 crc kubenswrapper[4701]: I1121 19:05:25.443362 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbtgz" event={"ID":"3e81175f-5aec-4176-b6ec-d4d292063f20","Type":"ContainerStarted","Data":"12b99400e48e27f3fc653ee2f89115864d62aced07068843f065b4d0d58bc3eb"} Nov 21 19:05:25 crc kubenswrapper[4701]: I1121 19:05:25.484263 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tbtgz" podStartSLOduration=3.01944496 podStartE2EDuration="5.484233383s" podCreationTimestamp="2025-11-21 19:05:20 +0000 UTC" firstStartedPulling="2025-11-21 19:05:22.399286856 +0000 UTC m=+213.184426893" lastFinishedPulling="2025-11-21 19:05:24.864075289 +0000 UTC m=+215.649215316" observedRunningTime="2025-11-21 19:05:25.482814204 +0000 UTC m=+216.267954231" watchObservedRunningTime="2025-11-21 19:05:25.484233383 +0000 UTC m=+216.269373410" Nov 21 19:05:26 crc kubenswrapper[4701]: I1121 19:05:26.460190 4701 generic.go:334] "Generic (PLEG): container finished" podID="83bec736-5bd4-4889-b0fe-864eaa0fcb3a" containerID="615a5dd21b6620c6b9d5187441674fdd10a3f6eb32e86ffff5cd2a6070e98af2" exitCode=0 Nov 21 19:05:26 crc kubenswrapper[4701]: I1121 19:05:26.460307 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r5kn" event={"ID":"83bec736-5bd4-4889-b0fe-864eaa0fcb3a","Type":"ContainerDied","Data":"615a5dd21b6620c6b9d5187441674fdd10a3f6eb32e86ffff5cd2a6070e98af2"} Nov 21 19:05:26 crc kubenswrapper[4701]: I1121 19:05:26.466264 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9dtr" event={"ID":"08bf0aaf-b621-48f2-b2b1-c6939a9a3440","Type":"ContainerStarted","Data":"628f47c896987f73e35c1803ac4cd7bde609b9966f9325adf41d9af1df9f4a40"} Nov 21 19:05:26 crc kubenswrapper[4701]: I1121 19:05:26.534466 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v9dtr" podStartSLOduration=3.117948467 podStartE2EDuration="4.534440637s" podCreationTimestamp="2025-11-21 19:05:22 +0000 UTC" firstStartedPulling="2025-11-21 19:05:24.435648483 +0000 UTC m=+215.220788550" lastFinishedPulling="2025-11-21 19:05:25.852140693 +0000 UTC m=+216.637280720" observedRunningTime="2025-11-21 19:05:26.531813355 +0000 UTC m=+217.316953372" watchObservedRunningTime="2025-11-21 19:05:26.534440637 +0000 UTC m=+217.319580664" Nov 21 19:05:28 crc kubenswrapper[4701]: I1121 19:05:28.496161 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r5kn" event={"ID":"83bec736-5bd4-4889-b0fe-864eaa0fcb3a","Type":"ContainerStarted","Data":"608b7aee6058693a7a9e2a8ef6528da5409e967967d18bf479d61fea3e86d149"} Nov 21 19:05:30 crc kubenswrapper[4701]: I1121 19:05:30.716674 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:30 crc kubenswrapper[4701]: I1121 19:05:30.717240 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:30 crc kubenswrapper[4701]: I1121 19:05:30.793896 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:30 crc kubenswrapper[4701]: I1121 19:05:30.822788 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2r5kn" podStartSLOduration=6.400477551 podStartE2EDuration="8.822758651s" podCreationTimestamp="2025-11-21 19:05:22 +0000 UTC" firstStartedPulling="2025-11-21 19:05:24.429681069 +0000 UTC m=+215.214821136" lastFinishedPulling="2025-11-21 19:05:26.851962209 +0000 UTC m=+217.637102236" observedRunningTime="2025-11-21 19:05:28.516643932 +0000 UTC m=+219.301783959" watchObservedRunningTime="2025-11-21 19:05:30.822758651 +0000 UTC m=+221.607898718" Nov 21 19:05:30 crc kubenswrapper[4701]: I1121 19:05:30.912233 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:30 crc kubenswrapper[4701]: I1121 19:05:30.912392 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:31 crc kubenswrapper[4701]: I1121 19:05:31.602488 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ctfn6" Nov 21 19:05:31 crc kubenswrapper[4701]: I1121 19:05:31.964739 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tbtgz" podUID="3e81175f-5aec-4176-b6ec-d4d292063f20" containerName="registry-server" probeResult="failure" output=< Nov 21 19:05:31 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:05:31 crc kubenswrapper[4701]: > Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.115045 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.115420 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.176248 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.354835 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.355301 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.394532 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.579837 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2r5kn" Nov 21 19:05:33 crc kubenswrapper[4701]: I1121 19:05:33.580272 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v9dtr" Nov 21 19:05:40 crc kubenswrapper[4701]: I1121 19:05:40.969022 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:05:41 crc kubenswrapper[4701]: I1121 19:05:41.007593 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tbtgz" Nov 21 19:07:18 crc kubenswrapper[4701]: I1121 19:07:18.613248 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:07:18 crc kubenswrapper[4701]: I1121 19:07:18.614154 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:07:48 crc kubenswrapper[4701]: I1121 19:07:48.613844 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:07:48 crc kubenswrapper[4701]: I1121 19:07:48.614717 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.614152 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.614900 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.615018 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.617157 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4588b2a736d568e8f69ecdacc0ee6977f154eb82e175accdf7d81cf19a181fd6"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.617312 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://4588b2a736d568e8f69ecdacc0ee6977f154eb82e175accdf7d81cf19a181fd6" gracePeriod=600 Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.818078 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="4588b2a736d568e8f69ecdacc0ee6977f154eb82e175accdf7d81cf19a181fd6" exitCode=0 Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.818224 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"4588b2a736d568e8f69ecdacc0ee6977f154eb82e175accdf7d81cf19a181fd6"} Nov 21 19:08:18 crc kubenswrapper[4701]: I1121 19:08:18.818412 4701 scope.go:117] "RemoveContainer" containerID="d2e81ee034439f66ef1d2da671f5fac04d3fcf9417df59f84be9db3c9e966322" Nov 21 19:08:19 crc kubenswrapper[4701]: I1121 19:08:19.830163 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"af4d914cc7c263c798f4370559a31981e2c52301881b123a393037c80c3da1f8"} Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.757408 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-s2x44"] Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.759004 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.778884 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-s2x44"] Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.859947 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-registry-certificates\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860065 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860168 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860259 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-registry-tls\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860357 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-bound-sa-token\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860781 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-trusted-ca\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.860879 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnf6r\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-kube-api-access-xnf6r\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.916973 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.966786 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-bound-sa-token\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.966866 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.966930 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-trusted-ca\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.967000 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnf6r\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-kube-api-access-xnf6r\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.967105 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-registry-certificates\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.967251 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.967321 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-registry-tls\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.971497 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-trusted-ca\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.971619 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-registry-certificates\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.971848 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.977911 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-registry-tls\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.983648 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:26 crc kubenswrapper[4701]: I1121 19:08:26.989767 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-bound-sa-token\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.004495 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnf6r\" (UniqueName: \"kubernetes.io/projected/26b3c7b0-75ed-4bb3-8661-d1d71ee7957e-kube-api-access-xnf6r\") pod \"image-registry-66df7c8f76-s2x44\" (UID: \"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e\") " pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.078277 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.283874 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-s2x44"] Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.889051 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" event={"ID":"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e","Type":"ContainerStarted","Data":"a85bdb2fc7943b6eec7ebdd619e940401cd5eab56856f66b595faa6fce1065cf"} Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.889477 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" event={"ID":"26b3c7b0-75ed-4bb3-8661-d1d71ee7957e","Type":"ContainerStarted","Data":"dcc5d385384801ae7b57a88636f7252fbdcade06469252824673454c9ad63e39"} Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.889509 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:27 crc kubenswrapper[4701]: I1121 19:08:27.920182 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" podStartSLOduration=1.920160984 podStartE2EDuration="1.920160984s" podCreationTimestamp="2025-11-21 19:08:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:08:27.918113569 +0000 UTC m=+398.703253626" watchObservedRunningTime="2025-11-21 19:08:27.920160984 +0000 UTC m=+398.705301011" Nov 21 19:08:47 crc kubenswrapper[4701]: I1121 19:08:47.086824 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-s2x44" Nov 21 19:08:47 crc kubenswrapper[4701]: I1121 19:08:47.163819 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wzsrk"] Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.231460 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" podUID="71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" containerName="registry" containerID="cri-o://7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc" gracePeriod=30 Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.681030 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.788973 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-tls\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.789138 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-trusted-ca\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.789387 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-ca-trust-extracted\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.789464 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-installation-pull-secrets\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.789686 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.789833 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whtlr\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-kube-api-access-whtlr\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.790710 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.790826 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-bound-sa-token\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.790894 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-certificates\") pod \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\" (UID: \"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1\") " Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.791619 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.792542 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.797771 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.797857 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.798719 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.799522 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-kube-api-access-whtlr" (OuterVolumeSpecName: "kube-api-access-whtlr") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "kube-api-access-whtlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.809761 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.823042 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" (UID: "71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.892882 4701 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.892930 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whtlr\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-kube-api-access-whtlr\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.892947 4701 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.892960 4701 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.892975 4701 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:12 crc kubenswrapper[4701]: I1121 19:09:12.892986 4701 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.222495 4701 generic.go:334] "Generic (PLEG): container finished" podID="71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" containerID="7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc" exitCode=0 Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.222584 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" event={"ID":"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1","Type":"ContainerDied","Data":"7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc"} Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.222612 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.222652 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wzsrk" event={"ID":"71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1","Type":"ContainerDied","Data":"b972d7d6b0636f43a9e54dd7abb163e4e545adb553b4ed10af6f216c3ec8b610"} Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.222697 4701 scope.go:117] "RemoveContainer" containerID="7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc" Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.254111 4701 scope.go:117] "RemoveContainer" containerID="7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc" Nov 21 19:09:13 crc kubenswrapper[4701]: E1121 19:09:13.255765 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc\": container with ID starting with 7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc not found: ID does not exist" containerID="7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc" Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.255832 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc"} err="failed to get container status \"7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc\": rpc error: code = NotFound desc = could not find container \"7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc\": container with ID starting with 7281301eeb0935fddb9b6bde6ce27d101499ebcdf9cb75abe9635f22a3174dbc not found: ID does not exist" Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.283437 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wzsrk"] Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.294312 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wzsrk"] Nov 21 19:09:13 crc kubenswrapper[4701]: I1121 19:09:13.964630 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" path="/var/lib/kubelet/pods/71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1/volumes" Nov 21 19:10:18 crc kubenswrapper[4701]: I1121 19:10:18.613627 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:10:18 crc kubenswrapper[4701]: I1121 19:10:18.614540 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:10:48 crc kubenswrapper[4701]: I1121 19:10:48.613618 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:10:48 crc kubenswrapper[4701]: I1121 19:10:48.616263 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.659720 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-2qrjs"] Nov 21 19:11:02 crc kubenswrapper[4701]: E1121 19:11:02.660933 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" containerName="registry" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.660958 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" containerName="registry" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.661105 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="71bb1fe1-6eaf-4e7f-922e-ce742d9d38b1" containerName="registry" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.661741 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.665295 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.665670 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.665924 4701 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-q459t" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.667018 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-868vc"] Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.668374 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-868vc" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.669841 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-2qrjs"] Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.671614 4701 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-mqcs4" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.682548 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-868vc"] Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.711493 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-z4fmm"] Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.712381 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.714443 4701 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-v4wgw" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.726068 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-z4fmm"] Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.729686 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x7ft\" (UniqueName: \"kubernetes.io/projected/325ae061-87b1-4272-a2d5-29a4fcf689f2-kube-api-access-8x7ft\") pod \"cert-manager-cainjector-7f985d654d-2qrjs\" (UID: \"325ae061-87b1-4272-a2d5-29a4fcf689f2\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.729811 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw87w\" (UniqueName: \"kubernetes.io/projected/0da0430e-e5cb-465f-8e96-49906f8c0965-kube-api-access-vw87w\") pod \"cert-manager-5b446d88c5-868vc\" (UID: \"0da0430e-e5cb-465f-8e96-49906f8c0965\") " pod="cert-manager/cert-manager-5b446d88c5-868vc" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.831573 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw87w\" (UniqueName: \"kubernetes.io/projected/0da0430e-e5cb-465f-8e96-49906f8c0965-kube-api-access-vw87w\") pod \"cert-manager-5b446d88c5-868vc\" (UID: \"0da0430e-e5cb-465f-8e96-49906f8c0965\") " pod="cert-manager/cert-manager-5b446d88c5-868vc" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.831688 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x7ft\" (UniqueName: \"kubernetes.io/projected/325ae061-87b1-4272-a2d5-29a4fcf689f2-kube-api-access-8x7ft\") pod \"cert-manager-cainjector-7f985d654d-2qrjs\" (UID: \"325ae061-87b1-4272-a2d5-29a4fcf689f2\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.831736 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng7nf\" (UniqueName: \"kubernetes.io/projected/e8e88792-2751-4fcc-b8b0-dd03328e12b8-kube-api-access-ng7nf\") pod \"cert-manager-webhook-5655c58dd6-z4fmm\" (UID: \"e8e88792-2751-4fcc-b8b0-dd03328e12b8\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.856405 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw87w\" (UniqueName: \"kubernetes.io/projected/0da0430e-e5cb-465f-8e96-49906f8c0965-kube-api-access-vw87w\") pod \"cert-manager-5b446d88c5-868vc\" (UID: \"0da0430e-e5cb-465f-8e96-49906f8c0965\") " pod="cert-manager/cert-manager-5b446d88c5-868vc" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.857658 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x7ft\" (UniqueName: \"kubernetes.io/projected/325ae061-87b1-4272-a2d5-29a4fcf689f2-kube-api-access-8x7ft\") pod \"cert-manager-cainjector-7f985d654d-2qrjs\" (UID: \"325ae061-87b1-4272-a2d5-29a4fcf689f2\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.933892 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng7nf\" (UniqueName: \"kubernetes.io/projected/e8e88792-2751-4fcc-b8b0-dd03328e12b8-kube-api-access-ng7nf\") pod \"cert-manager-webhook-5655c58dd6-z4fmm\" (UID: \"e8e88792-2751-4fcc-b8b0-dd03328e12b8\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.951165 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng7nf\" (UniqueName: \"kubernetes.io/projected/e8e88792-2751-4fcc-b8b0-dd03328e12b8-kube-api-access-ng7nf\") pod \"cert-manager-webhook-5655c58dd6-z4fmm\" (UID: \"e8e88792-2751-4fcc-b8b0-dd03328e12b8\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:02 crc kubenswrapper[4701]: I1121 19:11:02.999122 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" Nov 21 19:11:03 crc kubenswrapper[4701]: I1121 19:11:03.006306 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-868vc" Nov 21 19:11:03 crc kubenswrapper[4701]: I1121 19:11:03.036626 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:03 crc kubenswrapper[4701]: I1121 19:11:03.337801 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-868vc"] Nov 21 19:11:03 crc kubenswrapper[4701]: I1121 19:11:03.348001 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:11:03 crc kubenswrapper[4701]: I1121 19:11:03.378617 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-2qrjs"] Nov 21 19:11:03 crc kubenswrapper[4701]: I1121 19:11:03.414999 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-z4fmm"] Nov 21 19:11:03 crc kubenswrapper[4701]: W1121 19:11:03.418508 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8e88792_2751_4fcc_b8b0_dd03328e12b8.slice/crio-979d4bc533aea6d890bc477513a9c774e81165e8a9aec2a2592fd84c4809f1b6 WatchSource:0}: Error finding container 979d4bc533aea6d890bc477513a9c774e81165e8a9aec2a2592fd84c4809f1b6: Status 404 returned error can't find the container with id 979d4bc533aea6d890bc477513a9c774e81165e8a9aec2a2592fd84c4809f1b6 Nov 21 19:11:04 crc kubenswrapper[4701]: I1121 19:11:04.029328 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" event={"ID":"e8e88792-2751-4fcc-b8b0-dd03328e12b8","Type":"ContainerStarted","Data":"979d4bc533aea6d890bc477513a9c774e81165e8a9aec2a2592fd84c4809f1b6"} Nov 21 19:11:04 crc kubenswrapper[4701]: I1121 19:11:04.033470 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" event={"ID":"325ae061-87b1-4272-a2d5-29a4fcf689f2","Type":"ContainerStarted","Data":"04728db87ddfd428d410e77737407910e8f26c7ee2a2ee150e90cd1315dc1a2a"} Nov 21 19:11:04 crc kubenswrapper[4701]: I1121 19:11:04.035242 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-868vc" event={"ID":"0da0430e-e5cb-465f-8e96-49906f8c0965","Type":"ContainerStarted","Data":"4be13c8bf5d56c3de97622f769110a0d50540b91619587dae8be38914a9ebc90"} Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.068619 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" event={"ID":"325ae061-87b1-4272-a2d5-29a4fcf689f2","Type":"ContainerStarted","Data":"ade50c4d6e6bca5ac6ec7ca8dce1b62e4403d1db6bdcf7841b8927df5db00524"} Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.071807 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-868vc" event={"ID":"0da0430e-e5cb-465f-8e96-49906f8c0965","Type":"ContainerStarted","Data":"d1ef8ae78ac50c50585ccc8078afd61d5177047077abccf3932960fc762e5db8"} Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.074485 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" event={"ID":"e8e88792-2751-4fcc-b8b0-dd03328e12b8","Type":"ContainerStarted","Data":"9a4fc38f70659e0143960d0772b1b83666710dcb8b285f050a89c4b865c66f3e"} Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.074731 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.095758 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-2qrjs" podStartSLOduration=2.540855518 podStartE2EDuration="6.095730475s" podCreationTimestamp="2025-11-21 19:11:02 +0000 UTC" firstStartedPulling="2025-11-21 19:11:03.385746081 +0000 UTC m=+554.170886108" lastFinishedPulling="2025-11-21 19:11:06.940621028 +0000 UTC m=+557.725761065" observedRunningTime="2025-11-21 19:11:08.094979215 +0000 UTC m=+558.880119282" watchObservedRunningTime="2025-11-21 19:11:08.095730475 +0000 UTC m=+558.880870512" Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.115542 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-868vc" podStartSLOduration=2.601551208 podStartE2EDuration="6.11551726s" podCreationTimestamp="2025-11-21 19:11:02 +0000 UTC" firstStartedPulling="2025-11-21 19:11:03.347768186 +0000 UTC m=+554.132908213" lastFinishedPulling="2025-11-21 19:11:06.861734208 +0000 UTC m=+557.646874265" observedRunningTime="2025-11-21 19:11:08.112035624 +0000 UTC m=+558.897175661" watchObservedRunningTime="2025-11-21 19:11:08.11551726 +0000 UTC m=+558.900657297" Nov 21 19:11:08 crc kubenswrapper[4701]: I1121 19:11:08.172161 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" podStartSLOduration=2.731640185 podStartE2EDuration="6.172131956s" podCreationTimestamp="2025-11-21 19:11:02 +0000 UTC" firstStartedPulling="2025-11-21 19:11:03.421458833 +0000 UTC m=+554.206598860" lastFinishedPulling="2025-11-21 19:11:06.861950564 +0000 UTC m=+557.647090631" observedRunningTime="2025-11-21 19:11:08.172096815 +0000 UTC m=+558.957236882" watchObservedRunningTime="2025-11-21 19:11:08.172131956 +0000 UTC m=+558.957271973" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.041472 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-z4fmm" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.156871 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zzdxm"] Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.157515 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-controller" containerID="cri-o://09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.157693 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="sbdb" containerID="cri-o://86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.157735 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-acl-logging" containerID="cri-o://409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.157762 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.157717 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-node" containerID="cri-o://c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.158010 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="northd" containerID="cri-o://fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.157613 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="nbdb" containerID="cri-o://6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.204622 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" containerID="cri-o://b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" gracePeriod=30 Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.489408 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/2.log" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.492859 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovn-acl-logging/0.log" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.493617 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovn-controller/0.log" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.495493 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.575950 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hd7s8"] Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576333 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576355 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576376 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576388 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576413 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-node" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576428 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-node" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576452 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="sbdb" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576466 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="sbdb" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576485 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576503 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576538 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="northd" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576551 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="northd" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576565 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576578 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576594 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576606 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576619 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kubecfg-setup" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576631 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kubecfg-setup" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576649 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="nbdb" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576662 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="nbdb" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.576681 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-acl-logging" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576693 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-acl-logging" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576868 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576884 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576898 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="nbdb" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576914 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576933 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576948 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576966 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="northd" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.576983 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovn-acl-logging" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.577000 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="kube-rbac-proxy-node" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.577018 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="sbdb" Nov 21 19:11:13 crc kubenswrapper[4701]: E1121 19:11:13.577237 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.577253 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.577423 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerName="ovnkube-controller" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.582007 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609187 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-env-overrides\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609336 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-bin\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609373 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-log-socket\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609411 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-node-log\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609455 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccmf4\" (UniqueName: \"kubernetes.io/projected/cd6417be-62d7-4b6a-9711-a89211dca42e-kube-api-access-ccmf4\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609465 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609519 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-config\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609519 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-node-log" (OuterVolumeSpecName: "node-log") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609541 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-log-socket" (OuterVolumeSpecName: "log-socket") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609614 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609574 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-openvswitch\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609689 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd6417be-62d7-4b6a-9711-a89211dca42e-ovn-node-metrics-cert\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609728 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-netd\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609773 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-netns\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609807 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-etc-openvswitch\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609856 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-systemd-units\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609907 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-ovn-kubernetes\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609955 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609995 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-kubelet\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610029 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-systemd\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609937 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610079 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610101 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.609993 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610024 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610056 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610147 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610233 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610405 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-var-lib-openvswitch\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610498 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610668 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-ovn\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610674 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610747 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-slash\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610761 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.610846 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-slash" (OuterVolumeSpecName: "host-slash") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612061 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612128 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-script-lib\") pod \"cd6417be-62d7-4b6a-9711-a89211dca42e\" (UID: \"cd6417be-62d7-4b6a-9711-a89211dca42e\") " Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612452 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-ovn\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612619 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4hx7\" (UniqueName: \"kubernetes.io/projected/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-kube-api-access-v4hx7\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612734 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-kubelet\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612780 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-systemd-units\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612816 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovn-node-metrics-cert\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612900 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-run-ovn-kubernetes\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.612986 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-cni-bin\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613019 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-env-overrides\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613134 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613165 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-log-socket\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613396 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-slash\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613449 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-systemd\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613500 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-run-netns\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613609 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-var-lib-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613655 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613752 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-etc-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613828 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-cni-netd\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613873 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovnkube-config\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613922 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-node-log\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.613978 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovnkube-script-lib\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614092 4701 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-log-socket\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614115 4701 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614134 4701 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-node-log\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614154 4701 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614175 4701 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614191 4701 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614367 4701 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614385 4701 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614402 4701 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614422 4701 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614440 4701 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614458 4701 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614477 4701 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614494 4701 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-host-slash\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614509 4701 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614526 4701 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.614546 4701 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd6417be-62d7-4b6a-9711-a89211dca42e-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.620250 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd6417be-62d7-4b6a-9711-a89211dca42e-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.621226 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd6417be-62d7-4b6a-9711-a89211dca42e-kube-api-access-ccmf4" (OuterVolumeSpecName: "kube-api-access-ccmf4") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "kube-api-access-ccmf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.640325 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "cd6417be-62d7-4b6a-9711-a89211dca42e" (UID: "cd6417be-62d7-4b6a-9711-a89211dca42e"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716124 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovnkube-script-lib\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716213 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-ovn\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716240 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4hx7\" (UniqueName: \"kubernetes.io/projected/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-kube-api-access-v4hx7\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716273 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-kubelet\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716300 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-systemd-units\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716319 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovn-node-metrics-cert\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716339 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-run-ovn-kubernetes\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716359 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-cni-bin\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716383 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-env-overrides\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716405 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716423 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-log-socket\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716423 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-ovn\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716514 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-run-ovn-kubernetes\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716515 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-kubelet\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716558 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-cni-bin\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716445 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-slash\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716596 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-log-socket\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716593 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716498 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-slash\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716775 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-systemd-units\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716796 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-systemd\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716860 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-run-netns\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716924 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-var-lib-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716940 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-run-systemd\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.716967 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717000 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-run-netns\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717006 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-etc-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717060 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-var-lib-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717081 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-cni-netd\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717126 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovnkube-config\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717159 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-node-log\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717164 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-etc-openvswitch\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717286 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-env-overrides\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717118 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717314 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-host-cni-netd\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717339 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-node-log\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717418 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovnkube-script-lib\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717509 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccmf4\" (UniqueName: \"kubernetes.io/projected/cd6417be-62d7-4b6a-9711-a89211dca42e-kube-api-access-ccmf4\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717580 4701 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd6417be-62d7-4b6a-9711-a89211dca42e-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.717606 4701 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cd6417be-62d7-4b6a-9711-a89211dca42e-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.718327 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovnkube-config\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.722164 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-ovn-node-metrics-cert\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.736850 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4hx7\" (UniqueName: \"kubernetes.io/projected/d21728f1-bdf4-494b-8c7f-cfb7ea809f46-kube-api-access-v4hx7\") pod \"ovnkube-node-hd7s8\" (UID: \"d21728f1-bdf4-494b-8c7f-cfb7ea809f46\") " pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:13 crc kubenswrapper[4701]: I1121 19:11:13.898475 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.118666 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"bfb5b9732b379068bb4a67397f510fbf46b5b74f48e5e77e65fa135fed74f7d3"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.121639 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovnkube-controller/2.log" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.127182 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovn-acl-logging/0.log" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129173 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zzdxm_cd6417be-62d7-4b6a-9711-a89211dca42e/ovn-controller/0.log" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129809 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" exitCode=0 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129872 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" exitCode=0 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129893 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" exitCode=0 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129891 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129982 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130000 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130016 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.129912 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" exitCode=0 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130054 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" exitCode=0 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130073 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" exitCode=0 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130089 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" exitCode=143 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130108 4701 generic.go:334] "Generic (PLEG): container finished" podID="cd6417be-62d7-4b6a-9711-a89211dca42e" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" exitCode=143 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130037 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130167 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130187 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130222 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130244 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130254 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130261 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130269 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130277 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130284 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130292 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130300 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130058 4701 scope.go:117] "RemoveContainer" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130312 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130408 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130420 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130428 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130435 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130443 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130450 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130461 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130468 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130476 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130483 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130494 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130506 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130516 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130524 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130531 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130539 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130550 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130558 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130566 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130574 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130583 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130596 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zzdxm" event={"ID":"cd6417be-62d7-4b6a-9711-a89211dca42e","Type":"ContainerDied","Data":"476db94b54f79b4d4270d667eb8cb8eee3a7b807e90a12a98387e0eeb9e2310a"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130612 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130625 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130633 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130641 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130648 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130655 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130662 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130669 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130676 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.130682 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.138408 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/1.log" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.139321 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/0.log" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.139382 4701 generic.go:334] "Generic (PLEG): container finished" podID="2eababf7-b5d3-4479-9ad5-f1060898f324" containerID="d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464" exitCode=2 Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.139438 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kf9jq" event={"ID":"2eababf7-b5d3-4479-9ad5-f1060898f324","Type":"ContainerDied","Data":"d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.139478 4701 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456"} Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.140236 4701 scope.go:117] "RemoveContainer" containerID="d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.140555 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-kf9jq_openshift-multus(2eababf7-b5d3-4479-9ad5-f1060898f324)\"" pod="openshift-multus/multus-kf9jq" podUID="2eababf7-b5d3-4479-9ad5-f1060898f324" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.170751 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.171441 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zzdxm"] Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.182152 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zzdxm"] Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.215501 4701 scope.go:117] "RemoveContainer" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.231040 4701 scope.go:117] "RemoveContainer" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.246876 4701 scope.go:117] "RemoveContainer" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.261365 4701 scope.go:117] "RemoveContainer" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.276104 4701 scope.go:117] "RemoveContainer" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.294343 4701 scope.go:117] "RemoveContainer" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.332950 4701 scope.go:117] "RemoveContainer" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.355358 4701 scope.go:117] "RemoveContainer" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.372553 4701 scope.go:117] "RemoveContainer" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.373305 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": container with ID starting with b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867 not found: ID does not exist" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.373372 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} err="failed to get container status \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": rpc error: code = NotFound desc = could not find container \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": container with ID starting with b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.373416 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.374002 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": container with ID starting with ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63 not found: ID does not exist" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.374058 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} err="failed to get container status \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": rpc error: code = NotFound desc = could not find container \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": container with ID starting with ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.374101 4701 scope.go:117] "RemoveContainer" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.374619 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": container with ID starting with 86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69 not found: ID does not exist" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.374768 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} err="failed to get container status \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": rpc error: code = NotFound desc = could not find container \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": container with ID starting with 86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.374789 4701 scope.go:117] "RemoveContainer" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.375142 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": container with ID starting with 6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f not found: ID does not exist" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.375172 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} err="failed to get container status \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": rpc error: code = NotFound desc = could not find container \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": container with ID starting with 6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.375193 4701 scope.go:117] "RemoveContainer" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.375662 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": container with ID starting with fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531 not found: ID does not exist" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.375697 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} err="failed to get container status \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": rpc error: code = NotFound desc = could not find container \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": container with ID starting with fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.375717 4701 scope.go:117] "RemoveContainer" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.376250 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": container with ID starting with c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685 not found: ID does not exist" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.376308 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} err="failed to get container status \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": rpc error: code = NotFound desc = could not find container \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": container with ID starting with c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.376363 4701 scope.go:117] "RemoveContainer" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.376898 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": container with ID starting with c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c not found: ID does not exist" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.376943 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} err="failed to get container status \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": rpc error: code = NotFound desc = could not find container \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": container with ID starting with c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.376969 4701 scope.go:117] "RemoveContainer" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.377432 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": container with ID starting with 409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948 not found: ID does not exist" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.377507 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} err="failed to get container status \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": rpc error: code = NotFound desc = could not find container \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": container with ID starting with 409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.377541 4701 scope.go:117] "RemoveContainer" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.377936 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": container with ID starting with 09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba not found: ID does not exist" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.377968 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} err="failed to get container status \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": rpc error: code = NotFound desc = could not find container \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": container with ID starting with 09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.377985 4701 scope.go:117] "RemoveContainer" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" Nov 21 19:11:14 crc kubenswrapper[4701]: E1121 19:11:14.378399 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": container with ID starting with 3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9 not found: ID does not exist" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.378453 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} err="failed to get container status \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": rpc error: code = NotFound desc = could not find container \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": container with ID starting with 3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.378485 4701 scope.go:117] "RemoveContainer" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.378936 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} err="failed to get container status \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": rpc error: code = NotFound desc = could not find container \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": container with ID starting with b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.378999 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.379638 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} err="failed to get container status \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": rpc error: code = NotFound desc = could not find container \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": container with ID starting with ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.379672 4701 scope.go:117] "RemoveContainer" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.379996 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} err="failed to get container status \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": rpc error: code = NotFound desc = could not find container \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": container with ID starting with 86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.380041 4701 scope.go:117] "RemoveContainer" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.380491 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} err="failed to get container status \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": rpc error: code = NotFound desc = could not find container \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": container with ID starting with 6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.380518 4701 scope.go:117] "RemoveContainer" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.380832 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} err="failed to get container status \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": rpc error: code = NotFound desc = could not find container \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": container with ID starting with fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.380863 4701 scope.go:117] "RemoveContainer" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.381267 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} err="failed to get container status \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": rpc error: code = NotFound desc = could not find container \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": container with ID starting with c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.381295 4701 scope.go:117] "RemoveContainer" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.381616 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} err="failed to get container status \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": rpc error: code = NotFound desc = could not find container \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": container with ID starting with c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.381660 4701 scope.go:117] "RemoveContainer" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.382014 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} err="failed to get container status \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": rpc error: code = NotFound desc = could not find container \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": container with ID starting with 409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.382039 4701 scope.go:117] "RemoveContainer" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.382383 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} err="failed to get container status \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": rpc error: code = NotFound desc = could not find container \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": container with ID starting with 09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.382407 4701 scope.go:117] "RemoveContainer" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.382855 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} err="failed to get container status \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": rpc error: code = NotFound desc = could not find container \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": container with ID starting with 3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.382896 4701 scope.go:117] "RemoveContainer" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.383395 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} err="failed to get container status \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": rpc error: code = NotFound desc = could not find container \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": container with ID starting with b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.383422 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.383817 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} err="failed to get container status \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": rpc error: code = NotFound desc = could not find container \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": container with ID starting with ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.383855 4701 scope.go:117] "RemoveContainer" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.384286 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} err="failed to get container status \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": rpc error: code = NotFound desc = could not find container \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": container with ID starting with 86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.384314 4701 scope.go:117] "RemoveContainer" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.384640 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} err="failed to get container status \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": rpc error: code = NotFound desc = could not find container \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": container with ID starting with 6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.384687 4701 scope.go:117] "RemoveContainer" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.385044 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} err="failed to get container status \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": rpc error: code = NotFound desc = could not find container \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": container with ID starting with fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.385107 4701 scope.go:117] "RemoveContainer" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.385487 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} err="failed to get container status \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": rpc error: code = NotFound desc = could not find container \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": container with ID starting with c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.385512 4701 scope.go:117] "RemoveContainer" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.388163 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} err="failed to get container status \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": rpc error: code = NotFound desc = could not find container \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": container with ID starting with c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.388239 4701 scope.go:117] "RemoveContainer" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.388592 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} err="failed to get container status \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": rpc error: code = NotFound desc = could not find container \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": container with ID starting with 409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.388619 4701 scope.go:117] "RemoveContainer" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.389083 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} err="failed to get container status \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": rpc error: code = NotFound desc = could not find container \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": container with ID starting with 09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.389124 4701 scope.go:117] "RemoveContainer" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.389577 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} err="failed to get container status \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": rpc error: code = NotFound desc = could not find container \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": container with ID starting with 3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.389613 4701 scope.go:117] "RemoveContainer" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.389980 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} err="failed to get container status \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": rpc error: code = NotFound desc = could not find container \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": container with ID starting with b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.390010 4701 scope.go:117] "RemoveContainer" containerID="ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.390362 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63"} err="failed to get container status \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": rpc error: code = NotFound desc = could not find container \"ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63\": container with ID starting with ad576a4992adab10688e96fc25b505ad62fcb76a61f49f2c980efbd229127d63 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.390414 4701 scope.go:117] "RemoveContainer" containerID="86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.390775 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69"} err="failed to get container status \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": rpc error: code = NotFound desc = could not find container \"86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69\": container with ID starting with 86f401a23cc229e3d89e6db7727ec0f14889955621a9a08ab07bf6d01109de69 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.390807 4701 scope.go:117] "RemoveContainer" containerID="6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.391163 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f"} err="failed to get container status \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": rpc error: code = NotFound desc = could not find container \"6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f\": container with ID starting with 6f5d71cc1318f6df78b4867aa60f8c6a846bd6e3653f196c236d38c1e4cb434f not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.391226 4701 scope.go:117] "RemoveContainer" containerID="fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.391622 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531"} err="failed to get container status \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": rpc error: code = NotFound desc = could not find container \"fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531\": container with ID starting with fd312fbf4848f167c7bce38f6544dcdbd1b71af477008bc2420a58bd6180b531 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.391649 4701 scope.go:117] "RemoveContainer" containerID="c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.391977 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685"} err="failed to get container status \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": rpc error: code = NotFound desc = could not find container \"c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685\": container with ID starting with c7b100ce8d9e34d2fab2e44b45d663c023ff09b81d33d6c5d93dd30268a4f685 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392014 4701 scope.go:117] "RemoveContainer" containerID="c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392355 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c"} err="failed to get container status \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": rpc error: code = NotFound desc = could not find container \"c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c\": container with ID starting with c6bbccfba4788306dff91a5286d1d58aceb73319863ed71480195bef99db9a1c not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392380 4701 scope.go:117] "RemoveContainer" containerID="409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392630 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948"} err="failed to get container status \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": rpc error: code = NotFound desc = could not find container \"409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948\": container with ID starting with 409c432710d173b9ce0d136a4d47b86f71928746c64671ef5995a3d87a9ec948 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392659 4701 scope.go:117] "RemoveContainer" containerID="09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392926 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba"} err="failed to get container status \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": rpc error: code = NotFound desc = could not find container \"09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba\": container with ID starting with 09ffb01f9feee047ea7defb22a251638bcddf70f7c69f6d04088f39d77b66bba not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.392950 4701 scope.go:117] "RemoveContainer" containerID="3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.393236 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9"} err="failed to get container status \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": rpc error: code = NotFound desc = could not find container \"3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9\": container with ID starting with 3e59aba7a90dfee47b3dacc3d12ff18640c768039a4b87a0c4aab45573abfbf9 not found: ID does not exist" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.393256 4701 scope.go:117] "RemoveContainer" containerID="b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867" Nov 21 19:11:14 crc kubenswrapper[4701]: I1121 19:11:14.393510 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867"} err="failed to get container status \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": rpc error: code = NotFound desc = could not find container \"b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867\": container with ID starting with b491b2a33b86d66bb70fc5ff2ad44c4876016d6c3ec1a9e1343f0d023a703867 not found: ID does not exist" Nov 21 19:11:15 crc kubenswrapper[4701]: I1121 19:11:15.152570 4701 generic.go:334] "Generic (PLEG): container finished" podID="d21728f1-bdf4-494b-8c7f-cfb7ea809f46" containerID="3c195993cf7dd303192691c271934d260c41c000d570d65dff46ddb5af9435ef" exitCode=0 Nov 21 19:11:15 crc kubenswrapper[4701]: I1121 19:11:15.152698 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerDied","Data":"3c195993cf7dd303192691c271934d260c41c000d570d65dff46ddb5af9435ef"} Nov 21 19:11:15 crc kubenswrapper[4701]: I1121 19:11:15.981478 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd6417be-62d7-4b6a-9711-a89211dca42e" path="/var/lib/kubelet/pods/cd6417be-62d7-4b6a-9711-a89211dca42e/volumes" Nov 21 19:11:16 crc kubenswrapper[4701]: I1121 19:11:16.165448 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"9836e7d5d7ae45dc3d978b4b6224297a7ffdbd037fee847a89ae73fbf9a947d9"} Nov 21 19:11:16 crc kubenswrapper[4701]: I1121 19:11:16.165520 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"d99fb03efcb46be4216084f042bf46b3f673f3132b595fe25eb1ba52c5138efd"} Nov 21 19:11:16 crc kubenswrapper[4701]: I1121 19:11:16.165533 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"ecdba1c3ceaecc627870d42a32e2e603887b76ef62979b68845a572dde88f2a3"} Nov 21 19:11:16 crc kubenswrapper[4701]: I1121 19:11:16.165545 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"1d670f126e851529c9f18d29387e67267fab1c83300cdea0c4a4af16929f2601"} Nov 21 19:11:16 crc kubenswrapper[4701]: I1121 19:11:16.165556 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"68a9d8a5d8774c92415d3d708768bc34f32ed8d6b41a148035bc7193d7dda60c"} Nov 21 19:11:17 crc kubenswrapper[4701]: I1121 19:11:17.180278 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"1da8d0e76f9fc49b2e28a138034eb5ed43fa029d6153d22b490c80d2dced36c5"} Nov 21 19:11:18 crc kubenswrapper[4701]: I1121 19:11:18.614233 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:11:18 crc kubenswrapper[4701]: I1121 19:11:18.614857 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:11:18 crc kubenswrapper[4701]: I1121 19:11:18.614939 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:11:18 crc kubenswrapper[4701]: I1121 19:11:18.615994 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"af4d914cc7c263c798f4370559a31981e2c52301881b123a393037c80c3da1f8"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:11:18 crc kubenswrapper[4701]: I1121 19:11:18.616106 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://af4d914cc7c263c798f4370559a31981e2c52301881b123a393037c80c3da1f8" gracePeriod=600 Nov 21 19:11:19 crc kubenswrapper[4701]: I1121 19:11:19.199641 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="af4d914cc7c263c798f4370559a31981e2c52301881b123a393037c80c3da1f8" exitCode=0 Nov 21 19:11:19 crc kubenswrapper[4701]: I1121 19:11:19.199760 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"af4d914cc7c263c798f4370559a31981e2c52301881b123a393037c80c3da1f8"} Nov 21 19:11:19 crc kubenswrapper[4701]: I1121 19:11:19.199961 4701 scope.go:117] "RemoveContainer" containerID="4588b2a736d568e8f69ecdacc0ee6977f154eb82e175accdf7d81cf19a181fd6" Nov 21 19:11:19 crc kubenswrapper[4701]: I1121 19:11:19.200363 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"c8758f6e0ff69b0e680f67ce66823ba447806821fa55aca9dc22f0075d6645fd"} Nov 21 19:11:19 crc kubenswrapper[4701]: I1121 19:11:19.208212 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"d842bd51016161864728814a156f4a4087251bd28288eaf96f361fc1858be6fa"} Nov 21 19:11:21 crc kubenswrapper[4701]: I1121 19:11:21.428043 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" event={"ID":"d21728f1-bdf4-494b-8c7f-cfb7ea809f46","Type":"ContainerStarted","Data":"fdce0f4a61cf40820aa41f823705754abe86a6dd22927c51c52301ec099daba7"} Nov 21 19:11:21 crc kubenswrapper[4701]: I1121 19:11:21.428779 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:21 crc kubenswrapper[4701]: I1121 19:11:21.483700 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:21 crc kubenswrapper[4701]: I1121 19:11:21.539109 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" podStartSLOduration=8.539079163 podStartE2EDuration="8.539079163s" podCreationTimestamp="2025-11-21 19:11:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:11:21.468586614 +0000 UTC m=+572.253726661" watchObservedRunningTime="2025-11-21 19:11:21.539079163 +0000 UTC m=+572.324219200" Nov 21 19:11:22 crc kubenswrapper[4701]: I1121 19:11:22.437707 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:22 crc kubenswrapper[4701]: I1121 19:11:22.437805 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:22 crc kubenswrapper[4701]: I1121 19:11:22.500362 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:28 crc kubenswrapper[4701]: I1121 19:11:28.951273 4701 scope.go:117] "RemoveContainer" containerID="d836ae8a24de298a553266b2404ddcc460bc6aa64ccacb4f11b25fe6bf984464" Nov 21 19:11:30 crc kubenswrapper[4701]: I1121 19:11:30.509370 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/1.log" Nov 21 19:11:30 crc kubenswrapper[4701]: I1121 19:11:30.510611 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/0.log" Nov 21 19:11:30 crc kubenswrapper[4701]: I1121 19:11:30.510698 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kf9jq" event={"ID":"2eababf7-b5d3-4479-9ad5-f1060898f324","Type":"ContainerStarted","Data":"3d5e17e96498fb567d8265d1c396ca0d7ebf064b360f589369abe7c82dcba2a0"} Nov 21 19:11:43 crc kubenswrapper[4701]: I1121 19:11:43.935533 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hd7s8" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.173150 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl"] Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.174712 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.177586 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.200977 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl"] Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.278978 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stxvb\" (UniqueName: \"kubernetes.io/projected/57477ab7-1bf6-486a-ae7a-98cf3e893869-kube-api-access-stxvb\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.279337 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.279609 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.380794 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.380929 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stxvb\" (UniqueName: \"kubernetes.io/projected/57477ab7-1bf6-486a-ae7a-98cf3e893869-kube-api-access-stxvb\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.380996 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.382545 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.382572 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.418109 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stxvb\" (UniqueName: \"kubernetes.io/projected/57477ab7-1bf6-486a-ae7a-98cf3e893869-kube-api-access-stxvb\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.502981 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:44 crc kubenswrapper[4701]: I1121 19:11:44.817486 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl"] Nov 21 19:11:44 crc kubenswrapper[4701]: W1121 19:11:44.823226 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57477ab7_1bf6_486a_ae7a_98cf3e893869.slice/crio-e62bf3879263b3078a88bb2affbc8cf72316eab0f560ba9ca042247e7e14bf90 WatchSource:0}: Error finding container e62bf3879263b3078a88bb2affbc8cf72316eab0f560ba9ca042247e7e14bf90: Status 404 returned error can't find the container with id e62bf3879263b3078a88bb2affbc8cf72316eab0f560ba9ca042247e7e14bf90 Nov 21 19:11:45 crc kubenswrapper[4701]: I1121 19:11:45.630170 4701 generic.go:334] "Generic (PLEG): container finished" podID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerID="e3a61219e152d92a2a1d06b38c652fba319042b5f5841d3316e53b3b741e1fd0" exitCode=0 Nov 21 19:11:45 crc kubenswrapper[4701]: I1121 19:11:45.630360 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" event={"ID":"57477ab7-1bf6-486a-ae7a-98cf3e893869","Type":"ContainerDied","Data":"e3a61219e152d92a2a1d06b38c652fba319042b5f5841d3316e53b3b741e1fd0"} Nov 21 19:11:45 crc kubenswrapper[4701]: I1121 19:11:45.630726 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" event={"ID":"57477ab7-1bf6-486a-ae7a-98cf3e893869","Type":"ContainerStarted","Data":"e62bf3879263b3078a88bb2affbc8cf72316eab0f560ba9ca042247e7e14bf90"} Nov 21 19:11:47 crc kubenswrapper[4701]: I1121 19:11:47.650445 4701 generic.go:334] "Generic (PLEG): container finished" podID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerID="dee77b692a7bdacc86a847650a0efee1dbbd50e3efb86dba07ebd03a50724c7a" exitCode=0 Nov 21 19:11:47 crc kubenswrapper[4701]: I1121 19:11:47.650538 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" event={"ID":"57477ab7-1bf6-486a-ae7a-98cf3e893869","Type":"ContainerDied","Data":"dee77b692a7bdacc86a847650a0efee1dbbd50e3efb86dba07ebd03a50724c7a"} Nov 21 19:11:48 crc kubenswrapper[4701]: I1121 19:11:48.664118 4701 generic.go:334] "Generic (PLEG): container finished" podID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerID="76590db10a7723229eecf32cb0d4c87af0c1f3cc721693a1de00824c4fda3e70" exitCode=0 Nov 21 19:11:48 crc kubenswrapper[4701]: I1121 19:11:48.664263 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" event={"ID":"57477ab7-1bf6-486a-ae7a-98cf3e893869","Type":"ContainerDied","Data":"76590db10a7723229eecf32cb0d4c87af0c1f3cc721693a1de00824c4fda3e70"} Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.053405 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.212222 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stxvb\" (UniqueName: \"kubernetes.io/projected/57477ab7-1bf6-486a-ae7a-98cf3e893869-kube-api-access-stxvb\") pod \"57477ab7-1bf6-486a-ae7a-98cf3e893869\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.212298 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-bundle\") pod \"57477ab7-1bf6-486a-ae7a-98cf3e893869\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.212357 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-util\") pod \"57477ab7-1bf6-486a-ae7a-98cf3e893869\" (UID: \"57477ab7-1bf6-486a-ae7a-98cf3e893869\") " Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.214729 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-bundle" (OuterVolumeSpecName: "bundle") pod "57477ab7-1bf6-486a-ae7a-98cf3e893869" (UID: "57477ab7-1bf6-486a-ae7a-98cf3e893869"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.217842 4701 scope.go:117] "RemoveContainer" containerID="afd598ed08752341f70fd16a4c848cef3ecb0654dbdecd05c970129ef585a456" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.226951 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-util" (OuterVolumeSpecName: "util") pod "57477ab7-1bf6-486a-ae7a-98cf3e893869" (UID: "57477ab7-1bf6-486a-ae7a-98cf3e893869"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.234380 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57477ab7-1bf6-486a-ae7a-98cf3e893869-kube-api-access-stxvb" (OuterVolumeSpecName: "kube-api-access-stxvb") pod "57477ab7-1bf6-486a-ae7a-98cf3e893869" (UID: "57477ab7-1bf6-486a-ae7a-98cf3e893869"). InnerVolumeSpecName "kube-api-access-stxvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.314605 4701 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-util\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.314676 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stxvb\" (UniqueName: \"kubernetes.io/projected/57477ab7-1bf6-486a-ae7a-98cf3e893869-kube-api-access-stxvb\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.314708 4701 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57477ab7-1bf6-486a-ae7a-98cf3e893869-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.681871 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kf9jq_2eababf7-b5d3-4479-9ad5-f1060898f324/kube-multus/1.log" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.684496 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" event={"ID":"57477ab7-1bf6-486a-ae7a-98cf3e893869","Type":"ContainerDied","Data":"e62bf3879263b3078a88bb2affbc8cf72316eab0f560ba9ca042247e7e14bf90"} Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.684547 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e62bf3879263b3078a88bb2affbc8cf72316eab0f560ba9ca042247e7e14bf90" Nov 21 19:11:50 crc kubenswrapper[4701]: I1121 19:11:50.684665 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.361163 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt"] Nov 21 19:12:02 crc kubenswrapper[4701]: E1121 19:12:02.361913 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="util" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.361925 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="util" Nov 21 19:12:02 crc kubenswrapper[4701]: E1121 19:12:02.361946 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="pull" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.361953 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="pull" Nov 21 19:12:02 crc kubenswrapper[4701]: E1121 19:12:02.361967 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="extract" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.361973 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="extract" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.362067 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="57477ab7-1bf6-486a-ae7a-98cf3e893869" containerName="extract" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.362475 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.364634 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-tjrvx" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.365433 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.365662 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.384185 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.502011 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.502731 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.506010 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-qd2p6" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.506312 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.512425 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.513548 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.518957 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7stmb\" (UniqueName: \"kubernetes.io/projected/de3cafdb-cdad-4b38-a867-fd0e88551dc7-kube-api-access-7stmb\") pod \"obo-prometheus-operator-668cf9dfbb-2rtjt\" (UID: \"de3cafdb-cdad-4b38-a867-fd0e88551dc7\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.530178 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.537172 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.620161 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/11625333-ca33-46bb-9856-a0390b6283bf-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn\" (UID: \"11625333-ca33-46bb-9856-a0390b6283bf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.620251 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7stmb\" (UniqueName: \"kubernetes.io/projected/de3cafdb-cdad-4b38-a867-fd0e88551dc7-kube-api-access-7stmb\") pod \"obo-prometheus-operator-668cf9dfbb-2rtjt\" (UID: \"de3cafdb-cdad-4b38-a867-fd0e88551dc7\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.620299 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/91073027-6c2c-4cbf-af6d-bd763b073a0b-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g\" (UID: \"91073027-6c2c-4cbf-af6d-bd763b073a0b\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.620325 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/91073027-6c2c-4cbf-af6d-bd763b073a0b-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g\" (UID: \"91073027-6c2c-4cbf-af6d-bd763b073a0b\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.621082 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/11625333-ca33-46bb-9856-a0390b6283bf-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn\" (UID: \"11625333-ca33-46bb-9856-a0390b6283bf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.638448 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7stmb\" (UniqueName: \"kubernetes.io/projected/de3cafdb-cdad-4b38-a867-fd0e88551dc7-kube-api-access-7stmb\") pod \"obo-prometheus-operator-668cf9dfbb-2rtjt\" (UID: \"de3cafdb-cdad-4b38-a867-fd0e88551dc7\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.682743 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.687306 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-b65kf"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.688178 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.692952 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-2s98q" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.693105 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.715173 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-b65kf"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.721814 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/11625333-ca33-46bb-9856-a0390b6283bf-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn\" (UID: \"11625333-ca33-46bb-9856-a0390b6283bf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.721889 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/11625333-ca33-46bb-9856-a0390b6283bf-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn\" (UID: \"11625333-ca33-46bb-9856-a0390b6283bf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.721927 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/91073027-6c2c-4cbf-af6d-bd763b073a0b-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g\" (UID: \"91073027-6c2c-4cbf-af6d-bd763b073a0b\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.721947 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/91073027-6c2c-4cbf-af6d-bd763b073a0b-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g\" (UID: \"91073027-6c2c-4cbf-af6d-bd763b073a0b\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.728734 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/91073027-6c2c-4cbf-af6d-bd763b073a0b-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g\" (UID: \"91073027-6c2c-4cbf-af6d-bd763b073a0b\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.729061 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/91073027-6c2c-4cbf-af6d-bd763b073a0b-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g\" (UID: \"91073027-6c2c-4cbf-af6d-bd763b073a0b\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.730112 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/11625333-ca33-46bb-9856-a0390b6283bf-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn\" (UID: \"11625333-ca33-46bb-9856-a0390b6283bf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.730936 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/11625333-ca33-46bb-9856-a0390b6283bf-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn\" (UID: \"11625333-ca33-46bb-9856-a0390b6283bf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.820485 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.822753 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4bfc6b7-63e8-4ab2-a9f6-369332e97f12-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-b65kf\" (UID: \"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12\") " pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.822829 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh2pv\" (UniqueName: \"kubernetes.io/projected/c4bfc6b7-63e8-4ab2-a9f6-369332e97f12-kube-api-access-jh2pv\") pod \"observability-operator-d8bb48f5d-b65kf\" (UID: \"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12\") " pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.829953 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.891640 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-bczvw"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.892317 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.895627 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-dqw8w" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.921243 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-bczvw"] Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.924255 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4bfc6b7-63e8-4ab2-a9f6-369332e97f12-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-b65kf\" (UID: \"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12\") " pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.924378 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh2pv\" (UniqueName: \"kubernetes.io/projected/c4bfc6b7-63e8-4ab2-a9f6-369332e97f12-kube-api-access-jh2pv\") pod \"observability-operator-d8bb48f5d-b65kf\" (UID: \"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12\") " pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.931387 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4bfc6b7-63e8-4ab2-a9f6-369332e97f12-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-b65kf\" (UID: \"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12\") " pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:02 crc kubenswrapper[4701]: I1121 19:12:02.944320 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh2pv\" (UniqueName: \"kubernetes.io/projected/c4bfc6b7-63e8-4ab2-a9f6-369332e97f12-kube-api-access-jh2pv\") pod \"observability-operator-d8bb48f5d-b65kf\" (UID: \"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12\") " pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.026087 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9htgr\" (UniqueName: \"kubernetes.io/projected/4adf7511-ec5a-47a4-9e69-c8650f1bc017-kube-api-access-9htgr\") pod \"perses-operator-5446b9c989-bczvw\" (UID: \"4adf7511-ec5a-47a4-9e69-c8650f1bc017\") " pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.026244 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/4adf7511-ec5a-47a4-9e69-c8650f1bc017-openshift-service-ca\") pod \"perses-operator-5446b9c989-bczvw\" (UID: \"4adf7511-ec5a-47a4-9e69-c8650f1bc017\") " pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.033869 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt"] Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.078750 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.130060 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9htgr\" (UniqueName: \"kubernetes.io/projected/4adf7511-ec5a-47a4-9e69-c8650f1bc017-kube-api-access-9htgr\") pod \"perses-operator-5446b9c989-bczvw\" (UID: \"4adf7511-ec5a-47a4-9e69-c8650f1bc017\") " pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.130115 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/4adf7511-ec5a-47a4-9e69-c8650f1bc017-openshift-service-ca\") pod \"perses-operator-5446b9c989-bczvw\" (UID: \"4adf7511-ec5a-47a4-9e69-c8650f1bc017\") " pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.130995 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/4adf7511-ec5a-47a4-9e69-c8650f1bc017-openshift-service-ca\") pod \"perses-operator-5446b9c989-bczvw\" (UID: \"4adf7511-ec5a-47a4-9e69-c8650f1bc017\") " pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.149322 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9htgr\" (UniqueName: \"kubernetes.io/projected/4adf7511-ec5a-47a4-9e69-c8650f1bc017-kube-api-access-9htgr\") pod \"perses-operator-5446b9c989-bczvw\" (UID: \"4adf7511-ec5a-47a4-9e69-c8650f1bc017\") " pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.210681 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.435139 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn"] Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.441824 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-b65kf"] Nov 21 19:12:03 crc kubenswrapper[4701]: W1121 19:12:03.480962 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4bfc6b7_63e8_4ab2_a9f6_369332e97f12.slice/crio-b7e8fcc1a737114d7451d1a0df6b8c908c76f491e03508a70b2c5e20f428cf77 WatchSource:0}: Error finding container b7e8fcc1a737114d7451d1a0df6b8c908c76f491e03508a70b2c5e20f428cf77: Status 404 returned error can't find the container with id b7e8fcc1a737114d7451d1a0df6b8c908c76f491e03508a70b2c5e20f428cf77 Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.510415 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g"] Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.575938 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-bczvw"] Nov 21 19:12:03 crc kubenswrapper[4701]: W1121 19:12:03.582351 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4adf7511_ec5a_47a4_9e69_c8650f1bc017.slice/crio-2f916a0b9ef77a46fc2f79ad6c7df9c43e1a288e09c53d719c4b146011a61ead WatchSource:0}: Error finding container 2f916a0b9ef77a46fc2f79ad6c7df9c43e1a288e09c53d719c4b146011a61ead: Status 404 returned error can't find the container with id 2f916a0b9ef77a46fc2f79ad6c7df9c43e1a288e09c53d719c4b146011a61ead Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.790182 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" event={"ID":"91073027-6c2c-4cbf-af6d-bd763b073a0b","Type":"ContainerStarted","Data":"b4a3c8d0ad4d7c0f0252fea88d712f926cffefa81773447accb1080dc1271ea5"} Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.791309 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-bczvw" event={"ID":"4adf7511-ec5a-47a4-9e69-c8650f1bc017","Type":"ContainerStarted","Data":"2f916a0b9ef77a46fc2f79ad6c7df9c43e1a288e09c53d719c4b146011a61ead"} Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.792750 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" event={"ID":"de3cafdb-cdad-4b38-a867-fd0e88551dc7","Type":"ContainerStarted","Data":"c6f2845e72b5e6af0720414568da43872ab221fa3a2e00ba52ac599488876859"} Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.794232 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" event={"ID":"11625333-ca33-46bb-9856-a0390b6283bf","Type":"ContainerStarted","Data":"9f1ff31ea639d095aa21dc3799dec6b708bd5cd6dbb228a8b1a997bd94432c21"} Nov 21 19:12:03 crc kubenswrapper[4701]: I1121 19:12:03.795467 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" event={"ID":"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12","Type":"ContainerStarted","Data":"b7e8fcc1a737114d7451d1a0df6b8c908c76f491e03508a70b2c5e20f428cf77"} Nov 21 19:12:22 crc kubenswrapper[4701]: E1121 19:12:22.739468 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:ce7d2904f7b238aa37dfe74a0b76bf73629e7a14fa52bf54b0ecf030ca36f1bb" Nov 21 19:12:22 crc kubenswrapper[4701]: E1121 19:12:22.740682 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:ce7d2904f7b238aa37dfe74a0b76bf73629e7a14fa52bf54b0ecf030ca36f1bb,Command:[],Args:[--namespace=$(NAMESPACE) --images=perses=$(RELATED_IMAGE_PERSES) --images=alertmanager=$(RELATED_IMAGE_ALERTMANAGER) --images=prometheus=$(RELATED_IMAGE_PROMETHEUS) --images=thanos=$(RELATED_IMAGE_THANOS) --images=ui-dashboards=$(RELATED_IMAGE_CONSOLE_DASHBOARDS_PLUGIN) --images=ui-distributed-tracing=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN) --images=ui-distributed-tracing-pf5=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF5) --images=ui-distributed-tracing-pf4=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF4) --images=ui-logging=$(RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN) --images=ui-logging-pf4=$(RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN_PF4) --images=ui-troubleshooting-panel=$(RELATED_IMAGE_CONSOLE_TROUBLESHOOTING_PANEL_PLUGIN) --images=ui-monitoring=$(RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN) --images=ui-monitoring-pf5=$(RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN_PF5) --images=korrel8r=$(RELATED_IMAGE_KORREL8R) --images=health-analyzer=$(RELATED_IMAGE_CLUSTER_HEALTH_ANALYZER) --openshift.enabled=true],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:RELATED_IMAGE_ALERTMANAGER,Value:registry.redhat.io/cluster-observability-operator/alertmanager-rhel9@sha256:e718854a7d6ca8accf0fa72db0eb902e46c44d747ad51dc3f06bba0cefaa3c01,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS,Value:registry.redhat.io/cluster-observability-operator/prometheus-rhel9@sha256:17ea20be390a94ab39f5cdd7f0cbc2498046eebcf77fe3dec9aa288d5c2cf46b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_THANOS,Value:registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PERSES,Value:registry.redhat.io/cluster-observability-operator/perses-rhel9@sha256:91531137fc1dcd740e277e0f65e120a0176a16f788c14c27925b61aa0b792ade,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DASHBOARDS_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-rhel9@sha256:897e1bfad1187062725b54d87107bd0155972257a50d8335dd29e1999b828a4f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF5,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-pf5-rhel9@sha256:95fe5b5746ca8c07ac9217ce2d8ac8e6afad17af210f9d8e0074df1310b209a8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF4,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-pf4-rhel9@sha256:e9d9a89e4d8126a62b1852055482258ee528cac6398dd5d43ebad75ace0f33c9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/logging-console-plugin-rhel9@sha256:ec684a0645ceb917b019af7ddba68c3533416e356ab0d0320a30e75ca7ebb31b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN_PF4,Value:registry.redhat.io/cluster-observability-operator/logging-console-plugin-pf4-rhel9@sha256:3b9693fcde9b3a9494fb04735b1f7cfd0426f10be820fdc3f024175c0d3df1c9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_TROUBLESHOOTING_PANEL_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/troubleshooting-panel-console-plugin-rhel9@sha256:580606f194180accc8abba099e17a26dca7522ec6d233fa2fdd40312771703e3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-rhel9@sha256:e03777be39e71701935059cd877603874a13ac94daa73219d4e5e545599d78a9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN_PF5,Value:registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-pf5-rhel9@sha256:aa47256193cfd2877853878e1ae97d2ab8b8e5deae62b387cbfad02b284d379c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KORREL8R,Value:registry.redhat.io/cluster-observability-operator/korrel8r-rhel9@sha256:c595ff56b2cb85514bf4784db6ddb82e4e657e3e708a7fb695fc4997379a94d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLUSTER_HEALTH_ANALYZER,Value:registry.redhat.io/cluster-observability-operator/cluster-health-analyzer-rhel9@sha256:45a4ec2a519bcec99e886aa91596d5356a2414a2bd103baaef9fa7838c672eb2,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{400 -3} {} 400m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:observability-operator-tls,ReadOnly:true,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jh2pv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod observability-operator-d8bb48f5d-b65kf_openshift-operators(c4bfc6b7-63e8-4ab2-a9f6-369332e97f12): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 19:12:22 crc kubenswrapper[4701]: E1121 19:12:22.742148 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" podUID="c4bfc6b7-63e8-4ab2-a9f6-369332e97f12" Nov 21 19:12:22 crc kubenswrapper[4701]: E1121 19:12:22.987407 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:ce7d2904f7b238aa37dfe74a0b76bf73629e7a14fa52bf54b0ecf030ca36f1bb\\\"\"" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" podUID="c4bfc6b7-63e8-4ab2-a9f6-369332e97f12" Nov 21 19:12:23 crc kubenswrapper[4701]: I1121 19:12:23.990734 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" event={"ID":"de3cafdb-cdad-4b38-a867-fd0e88551dc7","Type":"ContainerStarted","Data":"726012f8c9cdb368249e86b671f9c8fff3a8d53b700d25a6caadf51e1647ae8f"} Nov 21 19:12:23 crc kubenswrapper[4701]: I1121 19:12:23.992949 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" event={"ID":"11625333-ca33-46bb-9856-a0390b6283bf","Type":"ContainerStarted","Data":"8ca426df97383392f7bd6b44c81967f64c6ff241fbc8f3413dfab58e4d239e22"} Nov 21 19:12:23 crc kubenswrapper[4701]: I1121 19:12:23.994463 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" event={"ID":"91073027-6c2c-4cbf-af6d-bd763b073a0b","Type":"ContainerStarted","Data":"961616a59f0a127b83a70496b249f8e235df9a31866563090999bbe09ee411a0"} Nov 21 19:12:23 crc kubenswrapper[4701]: I1121 19:12:23.996138 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-bczvw" event={"ID":"4adf7511-ec5a-47a4-9e69-c8650f1bc017","Type":"ContainerStarted","Data":"4cc5576a2f7fde50f140e083ee28948a9abbecf45cf89b5d7bab44bb8381663e"} Nov 21 19:12:23 crc kubenswrapper[4701]: I1121 19:12:23.996402 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:24 crc kubenswrapper[4701]: I1121 19:12:24.015279 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-2rtjt" podStartSLOduration=2.293459308 podStartE2EDuration="22.015255578s" podCreationTimestamp="2025-11-21 19:12:02 +0000 UTC" firstStartedPulling="2025-11-21 19:12:03.051941669 +0000 UTC m=+613.837081696" lastFinishedPulling="2025-11-21 19:12:22.773737939 +0000 UTC m=+633.558877966" observedRunningTime="2025-11-21 19:12:24.010509548 +0000 UTC m=+634.795649585" watchObservedRunningTime="2025-11-21 19:12:24.015255578 +0000 UTC m=+634.800395615" Nov 21 19:12:24 crc kubenswrapper[4701]: I1121 19:12:24.046924 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g" podStartSLOduration=2.772855399 podStartE2EDuration="22.046894129s" podCreationTimestamp="2025-11-21 19:12:02 +0000 UTC" firstStartedPulling="2025-11-21 19:12:03.520471625 +0000 UTC m=+614.305611652" lastFinishedPulling="2025-11-21 19:12:22.794510365 +0000 UTC m=+633.579650382" observedRunningTime="2025-11-21 19:12:24.041299697 +0000 UTC m=+634.826439724" watchObservedRunningTime="2025-11-21 19:12:24.046894129 +0000 UTC m=+634.832034166" Nov 21 19:12:24 crc kubenswrapper[4701]: I1121 19:12:24.076563 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn" podStartSLOduration=2.76661023 podStartE2EDuration="22.076548648s" podCreationTimestamp="2025-11-21 19:12:02 +0000 UTC" firstStartedPulling="2025-11-21 19:12:03.450344154 +0000 UTC m=+614.235484181" lastFinishedPulling="2025-11-21 19:12:22.760282582 +0000 UTC m=+633.545422599" observedRunningTime="2025-11-21 19:12:24.075704004 +0000 UTC m=+634.860844021" watchObservedRunningTime="2025-11-21 19:12:24.076548648 +0000 UTC m=+634.861688665" Nov 21 19:12:24 crc kubenswrapper[4701]: I1121 19:12:24.119911 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-bczvw" podStartSLOduration=2.9630232899999998 podStartE2EDuration="22.119894978s" podCreationTimestamp="2025-11-21 19:12:02 +0000 UTC" firstStartedPulling="2025-11-21 19:12:03.617414956 +0000 UTC m=+614.402554983" lastFinishedPulling="2025-11-21 19:12:22.774286644 +0000 UTC m=+633.559426671" observedRunningTime="2025-11-21 19:12:24.117290007 +0000 UTC m=+634.902430034" watchObservedRunningTime="2025-11-21 19:12:24.119894978 +0000 UTC m=+634.905035005" Nov 21 19:12:33 crc kubenswrapper[4701]: I1121 19:12:33.214866 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-bczvw" Nov 21 19:12:37 crc kubenswrapper[4701]: I1121 19:12:37.142904 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" event={"ID":"c4bfc6b7-63e8-4ab2-a9f6-369332e97f12","Type":"ContainerStarted","Data":"12b8402173c0b0c1fedf4622bd9cc2828b393f47d5b60f35cef9b0be74342e94"} Nov 21 19:12:37 crc kubenswrapper[4701]: I1121 19:12:37.144113 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:37 crc kubenswrapper[4701]: I1121 19:12:37.145832 4701 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-b65kf container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" start-of-body= Nov 21 19:12:37 crc kubenswrapper[4701]: I1121 19:12:37.145903 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" podUID="c4bfc6b7-63e8-4ab2-a9f6-369332e97f12" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" Nov 21 19:12:37 crc kubenswrapper[4701]: I1121 19:12:37.168867 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" podStartSLOduration=1.792210919 podStartE2EDuration="35.16884183s" podCreationTimestamp="2025-11-21 19:12:02 +0000 UTC" firstStartedPulling="2025-11-21 19:12:03.486175431 +0000 UTC m=+614.271315458" lastFinishedPulling="2025-11-21 19:12:36.862806302 +0000 UTC m=+647.647946369" observedRunningTime="2025-11-21 19:12:37.166844136 +0000 UTC m=+647.951984163" watchObservedRunningTime="2025-11-21 19:12:37.16884183 +0000 UTC m=+647.953981867" Nov 21 19:12:38 crc kubenswrapper[4701]: I1121 19:12:38.152485 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-b65kf" Nov 21 19:12:56 crc kubenswrapper[4701]: I1121 19:12:56.933696 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w"] Nov 21 19:12:56 crc kubenswrapper[4701]: I1121 19:12:56.935527 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:56 crc kubenswrapper[4701]: I1121 19:12:56.940035 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 19:12:56 crc kubenswrapper[4701]: I1121 19:12:56.974709 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w"] Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.020634 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.020725 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.020760 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9q2k\" (UniqueName: \"kubernetes.io/projected/dbcf8276-4c26-4faa-85dc-abc66d2004a6-kube-api-access-l9q2k\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.122506 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.122668 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.122747 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9q2k\" (UniqueName: \"kubernetes.io/projected/dbcf8276-4c26-4faa-85dc-abc66d2004a6-kube-api-access-l9q2k\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.123192 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.123274 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.143288 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9q2k\" (UniqueName: \"kubernetes.io/projected/dbcf8276-4c26-4faa-85dc-abc66d2004a6-kube-api-access-l9q2k\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.262553 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:12:57 crc kubenswrapper[4701]: I1121 19:12:57.807408 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w"] Nov 21 19:12:57 crc kubenswrapper[4701]: W1121 19:12:57.818055 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbcf8276_4c26_4faa_85dc_abc66d2004a6.slice/crio-27d243bc36ac5412f78cdc9c5f85fef82f4830f86116b48dea2383ca145a0a58 WatchSource:0}: Error finding container 27d243bc36ac5412f78cdc9c5f85fef82f4830f86116b48dea2383ca145a0a58: Status 404 returned error can't find the container with id 27d243bc36ac5412f78cdc9c5f85fef82f4830f86116b48dea2383ca145a0a58 Nov 21 19:12:58 crc kubenswrapper[4701]: I1121 19:12:58.291956 4701 generic.go:334] "Generic (PLEG): container finished" podID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerID="36dc6a2292fdc80dd62ddd611b08eef07f8daa771048532363518432857f9124" exitCode=0 Nov 21 19:12:58 crc kubenswrapper[4701]: I1121 19:12:58.292035 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" event={"ID":"dbcf8276-4c26-4faa-85dc-abc66d2004a6","Type":"ContainerDied","Data":"36dc6a2292fdc80dd62ddd611b08eef07f8daa771048532363518432857f9124"} Nov 21 19:12:58 crc kubenswrapper[4701]: I1121 19:12:58.292083 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" event={"ID":"dbcf8276-4c26-4faa-85dc-abc66d2004a6","Type":"ContainerStarted","Data":"27d243bc36ac5412f78cdc9c5f85fef82f4830f86116b48dea2383ca145a0a58"} Nov 21 19:13:00 crc kubenswrapper[4701]: E1121 19:13:00.076258 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbcf8276_4c26_4faa_85dc_abc66d2004a6.slice/crio-d807a787a45ca25f5d07d2ed8c2563553836517e8ef1cc8774178d275ce95a2a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbcf8276_4c26_4faa_85dc_abc66d2004a6.slice/crio-conmon-d807a787a45ca25f5d07d2ed8c2563553836517e8ef1cc8774178d275ce95a2a.scope\": RecentStats: unable to find data in memory cache]" Nov 21 19:13:00 crc kubenswrapper[4701]: I1121 19:13:00.320318 4701 generic.go:334] "Generic (PLEG): container finished" podID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerID="d807a787a45ca25f5d07d2ed8c2563553836517e8ef1cc8774178d275ce95a2a" exitCode=0 Nov 21 19:13:00 crc kubenswrapper[4701]: I1121 19:13:00.320408 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" event={"ID":"dbcf8276-4c26-4faa-85dc-abc66d2004a6","Type":"ContainerDied","Data":"d807a787a45ca25f5d07d2ed8c2563553836517e8ef1cc8774178d275ce95a2a"} Nov 21 19:13:01 crc kubenswrapper[4701]: I1121 19:13:01.334899 4701 generic.go:334] "Generic (PLEG): container finished" podID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerID="d90c18ae86e9c57aae348e937f536f5915481022e79d9ecda4f5f6ad9b601c4a" exitCode=0 Nov 21 19:13:01 crc kubenswrapper[4701]: I1121 19:13:01.334998 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" event={"ID":"dbcf8276-4c26-4faa-85dc-abc66d2004a6","Type":"ContainerDied","Data":"d90c18ae86e9c57aae348e937f536f5915481022e79d9ecda4f5f6ad9b601c4a"} Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.709132 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.817976 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-util\") pod \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.818112 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-bundle\") pod \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.818252 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9q2k\" (UniqueName: \"kubernetes.io/projected/dbcf8276-4c26-4faa-85dc-abc66d2004a6-kube-api-access-l9q2k\") pod \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\" (UID: \"dbcf8276-4c26-4faa-85dc-abc66d2004a6\") " Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.819440 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-bundle" (OuterVolumeSpecName: "bundle") pod "dbcf8276-4c26-4faa-85dc-abc66d2004a6" (UID: "dbcf8276-4c26-4faa-85dc-abc66d2004a6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.828779 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbcf8276-4c26-4faa-85dc-abc66d2004a6-kube-api-access-l9q2k" (OuterVolumeSpecName: "kube-api-access-l9q2k") pod "dbcf8276-4c26-4faa-85dc-abc66d2004a6" (UID: "dbcf8276-4c26-4faa-85dc-abc66d2004a6"). InnerVolumeSpecName "kube-api-access-l9q2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.846182 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-util" (OuterVolumeSpecName: "util") pod "dbcf8276-4c26-4faa-85dc-abc66d2004a6" (UID: "dbcf8276-4c26-4faa-85dc-abc66d2004a6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.920235 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9q2k\" (UniqueName: \"kubernetes.io/projected/dbcf8276-4c26-4faa-85dc-abc66d2004a6-kube-api-access-l9q2k\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.920291 4701 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-util\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:02 crc kubenswrapper[4701]: I1121 19:13:02.920315 4701 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbcf8276-4c26-4faa-85dc-abc66d2004a6-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:03 crc kubenswrapper[4701]: I1121 19:13:03.367727 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" event={"ID":"dbcf8276-4c26-4faa-85dc-abc66d2004a6","Type":"ContainerDied","Data":"27d243bc36ac5412f78cdc9c5f85fef82f4830f86116b48dea2383ca145a0a58"} Nov 21 19:13:03 crc kubenswrapper[4701]: I1121 19:13:03.367805 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27d243bc36ac5412f78cdc9c5f85fef82f4830f86116b48dea2383ca145a0a58" Nov 21 19:13:03 crc kubenswrapper[4701]: I1121 19:13:03.367892 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.113002 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-6fdlz"] Nov 21 19:13:07 crc kubenswrapper[4701]: E1121 19:13:07.114083 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="pull" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.114114 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="pull" Nov 21 19:13:07 crc kubenswrapper[4701]: E1121 19:13:07.114160 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="util" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.114178 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="util" Nov 21 19:13:07 crc kubenswrapper[4701]: E1121 19:13:07.114193 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="extract" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.114259 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="extract" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.114744 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbcf8276-4c26-4faa-85dc-abc66d2004a6" containerName="extract" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.115467 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.119158 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-bks2v" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.120301 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.125260 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.125927 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-6fdlz"] Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.193017 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2mhv\" (UniqueName: \"kubernetes.io/projected/22701628-3f03-4106-ad9e-1b727e2b7c08-kube-api-access-r2mhv\") pod \"nmstate-operator-557fdffb88-6fdlz\" (UID: \"22701628-3f03-4106-ad9e-1b727e2b7c08\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.294143 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2mhv\" (UniqueName: \"kubernetes.io/projected/22701628-3f03-4106-ad9e-1b727e2b7c08-kube-api-access-r2mhv\") pod \"nmstate-operator-557fdffb88-6fdlz\" (UID: \"22701628-3f03-4106-ad9e-1b727e2b7c08\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.313277 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2mhv\" (UniqueName: \"kubernetes.io/projected/22701628-3f03-4106-ad9e-1b727e2b7c08-kube-api-access-r2mhv\") pod \"nmstate-operator-557fdffb88-6fdlz\" (UID: \"22701628-3f03-4106-ad9e-1b727e2b7c08\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.435287 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" Nov 21 19:13:07 crc kubenswrapper[4701]: I1121 19:13:07.746165 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-6fdlz"] Nov 21 19:13:08 crc kubenswrapper[4701]: I1121 19:13:08.408505 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" event={"ID":"22701628-3f03-4106-ad9e-1b727e2b7c08","Type":"ContainerStarted","Data":"6ebae7c21d993f48ff2b592cbe0e34eb3a8700ca2a8b246650afccd2ba240965"} Nov 21 19:13:11 crc kubenswrapper[4701]: I1121 19:13:11.430656 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" event={"ID":"22701628-3f03-4106-ad9e-1b727e2b7c08","Type":"ContainerStarted","Data":"a6f5998f07fc7699d41217a1330c23d28b3e826097a2e0b0ea0ef5af3dbf246a"} Nov 21 19:13:11 crc kubenswrapper[4701]: I1121 19:13:11.458898 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-6fdlz" podStartSLOduration=1.9146817189999998 podStartE2EDuration="4.458871837s" podCreationTimestamp="2025-11-21 19:13:07 +0000 UTC" firstStartedPulling="2025-11-21 19:13:07.75971337 +0000 UTC m=+678.544853407" lastFinishedPulling="2025-11-21 19:13:10.303903498 +0000 UTC m=+681.089043525" observedRunningTime="2025-11-21 19:13:11.454312833 +0000 UTC m=+682.239452860" watchObservedRunningTime="2025-11-21 19:13:11.458871837 +0000 UTC m=+682.244011904" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.115073 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.116516 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.118581 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-c978k" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.123826 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-pthql"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.124650 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.162759 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.166540 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.169290 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-pthql"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.175162 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-7sdtf"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.175910 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.176374 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s85sw\" (UniqueName: \"kubernetes.io/projected/51b03b0f-062b-45d4-95b9-f965e2b69d80-kube-api-access-s85sw\") pod \"nmstate-metrics-5dcf9c57c5-p7n5j\" (UID: \"51b03b0f-062b-45d4-95b9-f965e2b69d80\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.176410 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/45825b15-9674-46b3-b29e-7d78c4de3274-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.176456 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2v8t\" (UniqueName: \"kubernetes.io/projected/45825b15-9674-46b3-b29e-7d78c4de3274-kube-api-access-t2v8t\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277782 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s85sw\" (UniqueName: \"kubernetes.io/projected/51b03b0f-062b-45d4-95b9-f965e2b69d80-kube-api-access-s85sw\") pod \"nmstate-metrics-5dcf9c57c5-p7n5j\" (UID: \"51b03b0f-062b-45d4-95b9-f965e2b69d80\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277841 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-dbus-socket\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277867 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/45825b15-9674-46b3-b29e-7d78c4de3274-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277887 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-ovs-socket\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277915 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdww8\" (UniqueName: \"kubernetes.io/projected/ff85cf8f-c850-4455-92b8-c7bc1c548e68-kube-api-access-bdww8\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277968 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2v8t\" (UniqueName: \"kubernetes.io/projected/45825b15-9674-46b3-b29e-7d78c4de3274-kube-api-access-t2v8t\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.277999 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-nmstate-lock\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: E1121 19:13:18.278101 4701 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 21 19:13:18 crc kubenswrapper[4701]: E1121 19:13:18.278242 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45825b15-9674-46b3-b29e-7d78c4de3274-tls-key-pair podName:45825b15-9674-46b3-b29e-7d78c4de3274 nodeName:}" failed. No retries permitted until 2025-11-21 19:13:18.778211049 +0000 UTC m=+689.563351076 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/45825b15-9674-46b3-b29e-7d78c4de3274-tls-key-pair") pod "nmstate-webhook-6b89b748d8-pthql" (UID: "45825b15-9674-46b3-b29e-7d78c4de3274") : secret "openshift-nmstate-webhook" not found Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.285673 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.286419 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.290756 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.290923 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-rq5n2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.291135 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.308305 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2v8t\" (UniqueName: \"kubernetes.io/projected/45825b15-9674-46b3-b29e-7d78c4de3274-kube-api-access-t2v8t\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.312072 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s85sw\" (UniqueName: \"kubernetes.io/projected/51b03b0f-062b-45d4-95b9-f965e2b69d80-kube-api-access-s85sw\") pod \"nmstate-metrics-5dcf9c57c5-p7n5j\" (UID: \"51b03b0f-062b-45d4-95b9-f965e2b69d80\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.326757 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379549 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlchs\" (UniqueName: \"kubernetes.io/projected/cc6e0ff9-3b8f-403f-9f52-52808c29059d-kube-api-access-dlchs\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379592 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdww8\" (UniqueName: \"kubernetes.io/projected/ff85cf8f-c850-4455-92b8-c7bc1c548e68-kube-api-access-bdww8\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379630 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/cc6e0ff9-3b8f-403f-9f52-52808c29059d-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379661 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc6e0ff9-3b8f-403f-9f52-52808c29059d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379678 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-nmstate-lock\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379719 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-dbus-socket\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379750 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-ovs-socket\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.379809 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-ovs-socket\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.380080 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-nmstate-lock\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.380523 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ff85cf8f-c850-4455-92b8-c7bc1c548e68-dbus-socket\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.404467 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdww8\" (UniqueName: \"kubernetes.io/projected/ff85cf8f-c850-4455-92b8-c7bc1c548e68-kube-api-access-bdww8\") pod \"nmstate-handler-7sdtf\" (UID: \"ff85cf8f-c850-4455-92b8-c7bc1c548e68\") " pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.466375 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.481279 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlchs\" (UniqueName: \"kubernetes.io/projected/cc6e0ff9-3b8f-403f-9f52-52808c29059d-kube-api-access-dlchs\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.481333 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/cc6e0ff9-3b8f-403f-9f52-52808c29059d-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.481380 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc6e0ff9-3b8f-403f-9f52-52808c29059d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.482261 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/cc6e0ff9-3b8f-403f-9f52-52808c29059d-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.484086 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6b4bd59897-k2pk2"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.484872 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.485002 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc6e0ff9-3b8f-403f-9f52-52808c29059d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.503033 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.507514 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b4bd59897-k2pk2"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.514165 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlchs\" (UniqueName: \"kubernetes.io/projected/cc6e0ff9-3b8f-403f-9f52-52808c29059d-kube-api-access-dlchs\") pod \"nmstate-console-plugin-5874bd7bc5-mxxhh\" (UID: \"cc6e0ff9-3b8f-403f-9f52-52808c29059d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: W1121 19:13:18.536428 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff85cf8f_c850_4455_92b8_c7bc1c548e68.slice/crio-a853aaba04508d21c260bef89341be7a02a70321695a7437778805ffd0acf6ef WatchSource:0}: Error finding container a853aaba04508d21c260bef89341be7a02a70321695a7437778805ffd0acf6ef: Status 404 returned error can't find the container with id a853aaba04508d21c260bef89341be7a02a70321695a7437778805ffd0acf6ef Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.582832 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-console-config\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.582898 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d9468cb2-7324-496f-a1a2-a88228cf462d-console-oauth-config\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.582918 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-service-ca\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.582942 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-trusted-ca-bundle\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.582969 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-oauth-serving-cert\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.583013 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9468cb2-7324-496f-a1a2-a88228cf462d-console-serving-cert\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.583034 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj8zm\" (UniqueName: \"kubernetes.io/projected/d9468cb2-7324-496f-a1a2-a88228cf462d-kube-api-access-qj8zm\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.616375 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.616847 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.646900 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684645 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-console-config\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684715 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d9468cb2-7324-496f-a1a2-a88228cf462d-console-oauth-config\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684757 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-service-ca\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684806 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-trusted-ca-bundle\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684835 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-oauth-serving-cert\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684905 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9468cb2-7324-496f-a1a2-a88228cf462d-console-serving-cert\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.684948 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj8zm\" (UniqueName: \"kubernetes.io/projected/d9468cb2-7324-496f-a1a2-a88228cf462d-kube-api-access-qj8zm\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.688182 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-console-config\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.688475 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-oauth-serving-cert\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.688987 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-service-ca\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.689022 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9468cb2-7324-496f-a1a2-a88228cf462d-trusted-ca-bundle\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.694603 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d9468cb2-7324-496f-a1a2-a88228cf462d-console-oauth-config\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.695534 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9468cb2-7324-496f-a1a2-a88228cf462d-console-serving-cert\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.703997 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj8zm\" (UniqueName: \"kubernetes.io/projected/d9468cb2-7324-496f-a1a2-a88228cf462d-kube-api-access-qj8zm\") pod \"console-6b4bd59897-k2pk2\" (UID: \"d9468cb2-7324-496f-a1a2-a88228cf462d\") " pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.720459 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j"] Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.785995 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/45825b15-9674-46b3-b29e-7d78c4de3274-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.790282 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/45825b15-9674-46b3-b29e-7d78c4de3274-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-pthql\" (UID: \"45825b15-9674-46b3-b29e-7d78c4de3274\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.829248 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:18 crc kubenswrapper[4701]: I1121 19:13:18.845849 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh"] Nov 21 19:13:18 crc kubenswrapper[4701]: W1121 19:13:18.852186 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc6e0ff9_3b8f_403f_9f52_52808c29059d.slice/crio-8aaafa7543d8bf59624e4ab06781bf5ffab7711c304d247cf5cc448310ea6070 WatchSource:0}: Error finding container 8aaafa7543d8bf59624e4ab06781bf5ffab7711c304d247cf5cc448310ea6070: Status 404 returned error can't find the container with id 8aaafa7543d8bf59624e4ab06781bf5ffab7711c304d247cf5cc448310ea6070 Nov 21 19:13:19 crc kubenswrapper[4701]: I1121 19:13:19.052340 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b4bd59897-k2pk2"] Nov 21 19:13:19 crc kubenswrapper[4701]: I1121 19:13:19.078842 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:20 crc kubenswrapper[4701]: I1121 19:13:20.344754 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" event={"ID":"51b03b0f-062b-45d4-95b9-f965e2b69d80","Type":"ContainerStarted","Data":"8c8082537d28cb3ba77d1a5f40f46546adc616033341073eac7b0aa34c2e3876"} Nov 21 19:13:20 crc kubenswrapper[4701]: I1121 19:13:20.347082 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" event={"ID":"cc6e0ff9-3b8f-403f-9f52-52808c29059d","Type":"ContainerStarted","Data":"8aaafa7543d8bf59624e4ab06781bf5ffab7711c304d247cf5cc448310ea6070"} Nov 21 19:13:20 crc kubenswrapper[4701]: I1121 19:13:20.348313 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-7sdtf" event={"ID":"ff85cf8f-c850-4455-92b8-c7bc1c548e68","Type":"ContainerStarted","Data":"a853aaba04508d21c260bef89341be7a02a70321695a7437778805ffd0acf6ef"} Nov 21 19:13:20 crc kubenswrapper[4701]: I1121 19:13:20.444133 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-pthql"] Nov 21 19:13:20 crc kubenswrapper[4701]: W1121 19:13:20.456741 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45825b15_9674_46b3_b29e_7d78c4de3274.slice/crio-164d11771791138ccfbd6d84962b2515bc3951fe13b8a5bdd0154a113f7321c8 WatchSource:0}: Error finding container 164d11771791138ccfbd6d84962b2515bc3951fe13b8a5bdd0154a113f7321c8: Status 404 returned error can't find the container with id 164d11771791138ccfbd6d84962b2515bc3951fe13b8a5bdd0154a113f7321c8 Nov 21 19:13:21 crc kubenswrapper[4701]: I1121 19:13:21.359031 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b4bd59897-k2pk2" event={"ID":"d9468cb2-7324-496f-a1a2-a88228cf462d","Type":"ContainerStarted","Data":"0f18ecd40db7b4ecd1ebd7d8ea4dc963159f703897e97412d0e339ce52eb614b"} Nov 21 19:13:21 crc kubenswrapper[4701]: I1121 19:13:21.360016 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b4bd59897-k2pk2" event={"ID":"d9468cb2-7324-496f-a1a2-a88228cf462d","Type":"ContainerStarted","Data":"e71b6ac323ae23cfe1048be796527f9b9fcb6cbaa281c293c0ffb9ba1d77cf31"} Nov 21 19:13:21 crc kubenswrapper[4701]: I1121 19:13:21.360746 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" event={"ID":"45825b15-9674-46b3-b29e-7d78c4de3274","Type":"ContainerStarted","Data":"164d11771791138ccfbd6d84962b2515bc3951fe13b8a5bdd0154a113f7321c8"} Nov 21 19:13:21 crc kubenswrapper[4701]: I1121 19:13:21.385087 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6b4bd59897-k2pk2" podStartSLOduration=3.385069745 podStartE2EDuration="3.385069745s" podCreationTimestamp="2025-11-21 19:13:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:13:21.382182447 +0000 UTC m=+692.167322474" watchObservedRunningTime="2025-11-21 19:13:21.385069745 +0000 UTC m=+692.170209772" Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.379930 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" event={"ID":"45825b15-9674-46b3-b29e-7d78c4de3274","Type":"ContainerStarted","Data":"a61de2eb28123cbd5e894bb362ce3b2ca1c56b65bf3401002746b3888bc5a7c8"} Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.380812 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.383285 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" event={"ID":"51b03b0f-062b-45d4-95b9-f965e2b69d80","Type":"ContainerStarted","Data":"3285d6d11ef3b2b019c441a0833953ec737b079f730c21b3a1c25b81dc957f33"} Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.385356 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" event={"ID":"cc6e0ff9-3b8f-403f-9f52-52808c29059d","Type":"ContainerStarted","Data":"3808003e615f16317673211d0ad511f266ea2186f33a72a921fe1f353bc05a63"} Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.387309 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-7sdtf" event={"ID":"ff85cf8f-c850-4455-92b8-c7bc1c548e68","Type":"ContainerStarted","Data":"570f7f063e708cb1582ff5051567c141f5af75a8af5e07dc2afd99d43810280f"} Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.387674 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.409334 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" podStartSLOduration=3.062759215 podStartE2EDuration="5.409315274s" podCreationTimestamp="2025-11-21 19:13:18 +0000 UTC" firstStartedPulling="2025-11-21 19:13:20.459427345 +0000 UTC m=+691.244567372" lastFinishedPulling="2025-11-21 19:13:22.805983404 +0000 UTC m=+693.591123431" observedRunningTime="2025-11-21 19:13:23.405791169 +0000 UTC m=+694.190931196" watchObservedRunningTime="2025-11-21 19:13:23.409315274 +0000 UTC m=+694.194455301" Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.443807 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-7sdtf" podStartSLOduration=1.138058045 podStartE2EDuration="5.443761179s" podCreationTimestamp="2025-11-21 19:13:18 +0000 UTC" firstStartedPulling="2025-11-21 19:13:18.540692847 +0000 UTC m=+689.325832874" lastFinishedPulling="2025-11-21 19:13:22.846395971 +0000 UTC m=+693.631536008" observedRunningTime="2025-11-21 19:13:23.441694462 +0000 UTC m=+694.226834489" watchObservedRunningTime="2025-11-21 19:13:23.443761179 +0000 UTC m=+694.228901246" Nov 21 19:13:23 crc kubenswrapper[4701]: I1121 19:13:23.469830 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-mxxhh" podStartSLOduration=1.517255738 podStartE2EDuration="5.469806245s" podCreationTimestamp="2025-11-21 19:13:18 +0000 UTC" firstStartedPulling="2025-11-21 19:13:18.854465985 +0000 UTC m=+689.639606002" lastFinishedPulling="2025-11-21 19:13:22.807016492 +0000 UTC m=+693.592156509" observedRunningTime="2025-11-21 19:13:23.463076453 +0000 UTC m=+694.248216490" watchObservedRunningTime="2025-11-21 19:13:23.469806245 +0000 UTC m=+694.254946282" Nov 21 19:13:25 crc kubenswrapper[4701]: I1121 19:13:25.402241 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" event={"ID":"51b03b0f-062b-45d4-95b9-f965e2b69d80","Type":"ContainerStarted","Data":"99f2d88618c40d9ee49da491f3fb1d0d568f2b6b6527255b44ab9502893cf64a"} Nov 21 19:13:25 crc kubenswrapper[4701]: I1121 19:13:25.424722 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-p7n5j" podStartSLOduration=0.957479408 podStartE2EDuration="7.424700064s" podCreationTimestamp="2025-11-21 19:13:18 +0000 UTC" firstStartedPulling="2025-11-21 19:13:18.732655892 +0000 UTC m=+689.517795929" lastFinishedPulling="2025-11-21 19:13:25.199876548 +0000 UTC m=+695.985016585" observedRunningTime="2025-11-21 19:13:25.419109032 +0000 UTC m=+696.204249069" watchObservedRunningTime="2025-11-21 19:13:25.424700064 +0000 UTC m=+696.209840101" Nov 21 19:13:28 crc kubenswrapper[4701]: I1121 19:13:28.542408 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-7sdtf" Nov 21 19:13:28 crc kubenswrapper[4701]: I1121 19:13:28.829837 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:28 crc kubenswrapper[4701]: I1121 19:13:28.829913 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:28 crc kubenswrapper[4701]: I1121 19:13:28.837870 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.082150 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gdj2w"] Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.082977 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" podUID="4b8139c8-66be-4f40-a084-aa26d58554bb" containerName="controller-manager" containerID="cri-o://3b06c27be1e9c7873e2125a6f907c1d7aab513a28b4bffb8268ae416e3fbfd52" gracePeriod=30 Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.219648 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z"] Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.219993 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" podUID="4167d110-2211-4862-af3d-b6b4a88a0bfd" containerName="route-controller-manager" containerID="cri-o://7c213ac2c74f637d0acca04440145ae3e7f1ff8c6d8d87f1b60f05941b0b3e02" gracePeriod=30 Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.434927 4701 generic.go:334] "Generic (PLEG): container finished" podID="4b8139c8-66be-4f40-a084-aa26d58554bb" containerID="3b06c27be1e9c7873e2125a6f907c1d7aab513a28b4bffb8268ae416e3fbfd52" exitCode=0 Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.435009 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" event={"ID":"4b8139c8-66be-4f40-a084-aa26d58554bb","Type":"ContainerDied","Data":"3b06c27be1e9c7873e2125a6f907c1d7aab513a28b4bffb8268ae416e3fbfd52"} Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.436723 4701 generic.go:334] "Generic (PLEG): container finished" podID="4167d110-2211-4862-af3d-b6b4a88a0bfd" containerID="7c213ac2c74f637d0acca04440145ae3e7f1ff8c6d8d87f1b60f05941b0b3e02" exitCode=0 Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.436819 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" event={"ID":"4167d110-2211-4862-af3d-b6b4a88a0bfd","Type":"ContainerDied","Data":"7c213ac2c74f637d0acca04440145ae3e7f1ff8c6d8d87f1b60f05941b0b3e02"} Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.453645 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6b4bd59897-k2pk2" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.518800 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-cwddx"] Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.677913 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.736648 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.761586 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-client-ca\") pod \"4b8139c8-66be-4f40-a084-aa26d58554bb\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.761716 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrbj5\" (UniqueName: \"kubernetes.io/projected/4b8139c8-66be-4f40-a084-aa26d58554bb-kube-api-access-jrbj5\") pod \"4b8139c8-66be-4f40-a084-aa26d58554bb\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.761844 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-config\") pod \"4b8139c8-66be-4f40-a084-aa26d58554bb\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.761873 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-proxy-ca-bundles\") pod \"4b8139c8-66be-4f40-a084-aa26d58554bb\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.761956 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b8139c8-66be-4f40-a084-aa26d58554bb-serving-cert\") pod \"4b8139c8-66be-4f40-a084-aa26d58554bb\" (UID: \"4b8139c8-66be-4f40-a084-aa26d58554bb\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.762744 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-client-ca" (OuterVolumeSpecName: "client-ca") pod "4b8139c8-66be-4f40-a084-aa26d58554bb" (UID: "4b8139c8-66be-4f40-a084-aa26d58554bb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.762957 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-config" (OuterVolumeSpecName: "config") pod "4b8139c8-66be-4f40-a084-aa26d58554bb" (UID: "4b8139c8-66be-4f40-a084-aa26d58554bb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.763122 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4b8139c8-66be-4f40-a084-aa26d58554bb" (UID: "4b8139c8-66be-4f40-a084-aa26d58554bb"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.772320 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b8139c8-66be-4f40-a084-aa26d58554bb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4b8139c8-66be-4f40-a084-aa26d58554bb" (UID: "4b8139c8-66be-4f40-a084-aa26d58554bb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.777472 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b8139c8-66be-4f40-a084-aa26d58554bb-kube-api-access-jrbj5" (OuterVolumeSpecName: "kube-api-access-jrbj5") pod "4b8139c8-66be-4f40-a084-aa26d58554bb" (UID: "4b8139c8-66be-4f40-a084-aa26d58554bb"). InnerVolumeSpecName "kube-api-access-jrbj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.861004 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7994947d46-4kc5p"] Nov 21 19:13:29 crc kubenswrapper[4701]: E1121 19:13:29.861268 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4167d110-2211-4862-af3d-b6b4a88a0bfd" containerName="route-controller-manager" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.861282 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4167d110-2211-4862-af3d-b6b4a88a0bfd" containerName="route-controller-manager" Nov 21 19:13:29 crc kubenswrapper[4701]: E1121 19:13:29.861295 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8139c8-66be-4f40-a084-aa26d58554bb" containerName="controller-manager" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.861303 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8139c8-66be-4f40-a084-aa26d58554bb" containerName="controller-manager" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.861401 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b8139c8-66be-4f40-a084-aa26d58554bb" containerName="controller-manager" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.861412 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4167d110-2211-4862-af3d-b6b4a88a0bfd" containerName="route-controller-manager" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.861779 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.862949 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-client-ca\") pod \"4167d110-2211-4862-af3d-b6b4a88a0bfd\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.862984 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l2xf\" (UniqueName: \"kubernetes.io/projected/4167d110-2211-4862-af3d-b6b4a88a0bfd-kube-api-access-4l2xf\") pod \"4167d110-2211-4862-af3d-b6b4a88a0bfd\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863153 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-config\") pod \"4167d110-2211-4862-af3d-b6b4a88a0bfd\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863235 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4167d110-2211-4862-af3d-b6b4a88a0bfd-serving-cert\") pod \"4167d110-2211-4862-af3d-b6b4a88a0bfd\" (UID: \"4167d110-2211-4862-af3d-b6b4a88a0bfd\") " Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863500 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863521 4701 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863534 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b8139c8-66be-4f40-a084-aa26d58554bb-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863545 4701 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b8139c8-66be-4f40-a084-aa26d58554bb-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.863555 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrbj5\" (UniqueName: \"kubernetes.io/projected/4b8139c8-66be-4f40-a084-aa26d58554bb-kube-api-access-jrbj5\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.864229 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-client-ca" (OuterVolumeSpecName: "client-ca") pod "4167d110-2211-4862-af3d-b6b4a88a0bfd" (UID: "4167d110-2211-4862-af3d-b6b4a88a0bfd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.864319 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-config" (OuterVolumeSpecName: "config") pod "4167d110-2211-4862-af3d-b6b4a88a0bfd" (UID: "4167d110-2211-4862-af3d-b6b4a88a0bfd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.868643 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4167d110-2211-4862-af3d-b6b4a88a0bfd-kube-api-access-4l2xf" (OuterVolumeSpecName: "kube-api-access-4l2xf") pod "4167d110-2211-4862-af3d-b6b4a88a0bfd" (UID: "4167d110-2211-4862-af3d-b6b4a88a0bfd"). InnerVolumeSpecName "kube-api-access-4l2xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.873795 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4167d110-2211-4862-af3d-b6b4a88a0bfd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4167d110-2211-4862-af3d-b6b4a88a0bfd" (UID: "4167d110-2211-4862-af3d-b6b4a88a0bfd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.879106 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb"] Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.895125 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.896017 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7994947d46-4kc5p"] Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.914244 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb"] Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965312 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-proxy-ca-bundles\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965379 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-config\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965415 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c4hg\" (UniqueName: \"kubernetes.io/projected/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-kube-api-access-5c4hg\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965465 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-client-ca\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965498 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lnnq\" (UniqueName: \"kubernetes.io/projected/a10fea6b-7759-4f55-8ae5-70f867e77b9a-kube-api-access-2lnnq\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965533 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-client-ca\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965559 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10fea6b-7759-4f55-8ae5-70f867e77b9a-serving-cert\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965588 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-config\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965617 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-serving-cert\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965683 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965699 4701 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4167d110-2211-4862-af3d-b6b4a88a0bfd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965714 4701 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4167d110-2211-4862-af3d-b6b4a88a0bfd-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:29 crc kubenswrapper[4701]: I1121 19:13:29.965726 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l2xf\" (UniqueName: \"kubernetes.io/projected/4167d110-2211-4862-af3d-b6b4a88a0bfd-kube-api-access-4l2xf\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067359 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-client-ca\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067424 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10fea6b-7759-4f55-8ae5-70f867e77b9a-serving-cert\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067458 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-config\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067500 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-serving-cert\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067588 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-proxy-ca-bundles\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067624 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-config\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067650 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c4hg\" (UniqueName: \"kubernetes.io/projected/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-kube-api-access-5c4hg\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067708 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-client-ca\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.067739 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lnnq\" (UniqueName: \"kubernetes.io/projected/a10fea6b-7759-4f55-8ae5-70f867e77b9a-kube-api-access-2lnnq\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.069570 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-proxy-ca-bundles\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.069602 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-client-ca\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.070420 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-client-ca\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.071488 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10fea6b-7759-4f55-8ae5-70f867e77b9a-config\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.073394 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-config\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.076383 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10fea6b-7759-4f55-8ae5-70f867e77b9a-serving-cert\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.076904 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-serving-cert\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.093721 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lnnq\" (UniqueName: \"kubernetes.io/projected/a10fea6b-7759-4f55-8ae5-70f867e77b9a-kube-api-access-2lnnq\") pod \"controller-manager-7994947d46-4kc5p\" (UID: \"a10fea6b-7759-4f55-8ae5-70f867e77b9a\") " pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.096119 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c4hg\" (UniqueName: \"kubernetes.io/projected/08c08ab9-30fe-48f8-8763-85a30b6b0c6d-kube-api-access-5c4hg\") pod \"route-controller-manager-696859d587-kh6bb\" (UID: \"08c08ab9-30fe-48f8-8763-85a30b6b0c6d\") " pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.177903 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.217524 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.452580 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" event={"ID":"4167d110-2211-4862-af3d-b6b4a88a0bfd","Type":"ContainerDied","Data":"f27f6da2a8d5df7efa9f0b995359cca551c540a1413af003af00ba0242284abc"} Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.452660 4701 scope.go:117] "RemoveContainer" containerID="7c213ac2c74f637d0acca04440145ae3e7f1ff8c6d8d87f1b60f05941b0b3e02" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.452760 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.465140 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.465948 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gdj2w" event={"ID":"4b8139c8-66be-4f40-a084-aa26d58554bb","Type":"ContainerDied","Data":"5094b19c6891b6b5b4d7d8e4870ec929956b7a7bad86e95e2aaf66c698002301"} Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.480805 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z"] Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.488789 4701 scope.go:117] "RemoveContainer" containerID="3b06c27be1e9c7873e2125a6f907c1d7aab513a28b4bffb8268ae416e3fbfd52" Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.489730 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-75d7z"] Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.500328 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7994947d46-4kc5p"] Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.508189 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gdj2w"] Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.508262 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gdj2w"] Nov 21 19:13:30 crc kubenswrapper[4701]: I1121 19:13:30.574329 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb"] Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.475702 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" event={"ID":"a10fea6b-7759-4f55-8ae5-70f867e77b9a","Type":"ContainerStarted","Data":"236c5edd71ba088a51d499c03ed02974f2c6190fb7d806b6654b3be1062e7a7f"} Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.476348 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" event={"ID":"a10fea6b-7759-4f55-8ae5-70f867e77b9a","Type":"ContainerStarted","Data":"3738e2c3d75b2705e9bfd73d8b4e0fcb1482e648f521975d0fbceeccc93ecd30"} Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.476385 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.486312 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" event={"ID":"08c08ab9-30fe-48f8-8763-85a30b6b0c6d","Type":"ContainerStarted","Data":"75771ea381f0a03c2f2234a4872eb8e7b3431f47aa833fb71d14e7c2a2aa194b"} Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.486446 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" event={"ID":"08c08ab9-30fe-48f8-8763-85a30b6b0c6d","Type":"ContainerStarted","Data":"8e101ecd28bb70aa999c95f59a3b33530ac3ce0ee1359cf64b90b0783ce5a79d"} Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.486674 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.491293 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.496735 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.504822 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7994947d46-4kc5p" podStartSLOduration=2.504797141 podStartE2EDuration="2.504797141s" podCreationTimestamp="2025-11-21 19:13:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:13:31.497483063 +0000 UTC m=+702.282623090" watchObservedRunningTime="2025-11-21 19:13:31.504797141 +0000 UTC m=+702.289937208" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.526795 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-696859d587-kh6bb" podStartSLOduration=2.526765567 podStartE2EDuration="2.526765567s" podCreationTimestamp="2025-11-21 19:13:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:13:31.523486258 +0000 UTC m=+702.308626325" watchObservedRunningTime="2025-11-21 19:13:31.526765567 +0000 UTC m=+702.311905624" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.981323 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4167d110-2211-4862-af3d-b6b4a88a0bfd" path="/var/lib/kubelet/pods/4167d110-2211-4862-af3d-b6b4a88a0bfd/volumes" Nov 21 19:13:31 crc kubenswrapper[4701]: I1121 19:13:31.982552 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b8139c8-66be-4f40-a084-aa26d58554bb" path="/var/lib/kubelet/pods/4b8139c8-66be-4f40-a084-aa26d58554bb/volumes" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.025107 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zq7bf"] Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.027183 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.042857 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zq7bf"] Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.074779 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-catalog-content\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.074865 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpwq7\" (UniqueName: \"kubernetes.io/projected/4cd49166-63d9-411e-aef7-6e5f196d78f0-kube-api-access-qpwq7\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.074904 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-utilities\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.077966 4701 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.176619 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-catalog-content\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.176674 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpwq7\" (UniqueName: \"kubernetes.io/projected/4cd49166-63d9-411e-aef7-6e5f196d78f0-kube-api-access-qpwq7\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.176701 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-utilities\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.177186 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-catalog-content\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.177203 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-utilities\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.205334 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpwq7\" (UniqueName: \"kubernetes.io/projected/4cd49166-63d9-411e-aef7-6e5f196d78f0-kube-api-access-qpwq7\") pod \"redhat-marketplace-zq7bf\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.344031 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:36 crc kubenswrapper[4701]: I1121 19:13:36.878115 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zq7bf"] Nov 21 19:13:36 crc kubenswrapper[4701]: W1121 19:13:36.885717 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cd49166_63d9_411e_aef7_6e5f196d78f0.slice/crio-98a9ae01f861f4eafa0633f938ab039b053fb7b81c3d16e4a80da7cee015f893 WatchSource:0}: Error finding container 98a9ae01f861f4eafa0633f938ab039b053fb7b81c3d16e4a80da7cee015f893: Status 404 returned error can't find the container with id 98a9ae01f861f4eafa0633f938ab039b053fb7b81c3d16e4a80da7cee015f893 Nov 21 19:13:37 crc kubenswrapper[4701]: I1121 19:13:37.539134 4701 generic.go:334] "Generic (PLEG): container finished" podID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerID="e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118" exitCode=0 Nov 21 19:13:37 crc kubenswrapper[4701]: I1121 19:13:37.539259 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zq7bf" event={"ID":"4cd49166-63d9-411e-aef7-6e5f196d78f0","Type":"ContainerDied","Data":"e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118"} Nov 21 19:13:37 crc kubenswrapper[4701]: I1121 19:13:37.539297 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zq7bf" event={"ID":"4cd49166-63d9-411e-aef7-6e5f196d78f0","Type":"ContainerStarted","Data":"98a9ae01f861f4eafa0633f938ab039b053fb7b81c3d16e4a80da7cee015f893"} Nov 21 19:13:38 crc kubenswrapper[4701]: I1121 19:13:38.547824 4701 generic.go:334] "Generic (PLEG): container finished" podID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerID="ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6" exitCode=0 Nov 21 19:13:38 crc kubenswrapper[4701]: I1121 19:13:38.548056 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zq7bf" event={"ID":"4cd49166-63d9-411e-aef7-6e5f196d78f0","Type":"ContainerDied","Data":"ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6"} Nov 21 19:13:39 crc kubenswrapper[4701]: I1121 19:13:39.088125 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-pthql" Nov 21 19:13:39 crc kubenswrapper[4701]: I1121 19:13:39.560102 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zq7bf" event={"ID":"4cd49166-63d9-411e-aef7-6e5f196d78f0","Type":"ContainerStarted","Data":"3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79"} Nov 21 19:13:39 crc kubenswrapper[4701]: I1121 19:13:39.590177 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zq7bf" podStartSLOduration=2.131003358 podStartE2EDuration="3.590143844s" podCreationTimestamp="2025-11-21 19:13:36 +0000 UTC" firstStartedPulling="2025-11-21 19:13:37.541021771 +0000 UTC m=+708.326161808" lastFinishedPulling="2025-11-21 19:13:39.000162257 +0000 UTC m=+709.785302294" observedRunningTime="2025-11-21 19:13:39.585457558 +0000 UTC m=+710.370597595" watchObservedRunningTime="2025-11-21 19:13:39.590143844 +0000 UTC m=+710.375283911" Nov 21 19:13:46 crc kubenswrapper[4701]: I1121 19:13:46.344671 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:46 crc kubenswrapper[4701]: I1121 19:13:46.345689 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:46 crc kubenswrapper[4701]: I1121 19:13:46.410352 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:46 crc kubenswrapper[4701]: I1121 19:13:46.669899 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:46 crc kubenswrapper[4701]: I1121 19:13:46.717077 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zq7bf"] Nov 21 19:13:48 crc kubenswrapper[4701]: I1121 19:13:48.613389 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:13:48 crc kubenswrapper[4701]: I1121 19:13:48.614295 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:13:48 crc kubenswrapper[4701]: I1121 19:13:48.640468 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zq7bf" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="registry-server" containerID="cri-o://3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79" gracePeriod=2 Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.336951 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.435078 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpwq7\" (UniqueName: \"kubernetes.io/projected/4cd49166-63d9-411e-aef7-6e5f196d78f0-kube-api-access-qpwq7\") pod \"4cd49166-63d9-411e-aef7-6e5f196d78f0\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.435258 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-utilities\") pod \"4cd49166-63d9-411e-aef7-6e5f196d78f0\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.435337 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-catalog-content\") pod \"4cd49166-63d9-411e-aef7-6e5f196d78f0\" (UID: \"4cd49166-63d9-411e-aef7-6e5f196d78f0\") " Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.437079 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-utilities" (OuterVolumeSpecName: "utilities") pod "4cd49166-63d9-411e-aef7-6e5f196d78f0" (UID: "4cd49166-63d9-411e-aef7-6e5f196d78f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.447867 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cd49166-63d9-411e-aef7-6e5f196d78f0-kube-api-access-qpwq7" (OuterVolumeSpecName: "kube-api-access-qpwq7") pod "4cd49166-63d9-411e-aef7-6e5f196d78f0" (UID: "4cd49166-63d9-411e-aef7-6e5f196d78f0"). InnerVolumeSpecName "kube-api-access-qpwq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.470102 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4cd49166-63d9-411e-aef7-6e5f196d78f0" (UID: "4cd49166-63d9-411e-aef7-6e5f196d78f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.536992 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpwq7\" (UniqueName: \"kubernetes.io/projected/4cd49166-63d9-411e-aef7-6e5f196d78f0-kube-api-access-qpwq7\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.537513 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.537537 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cd49166-63d9-411e-aef7-6e5f196d78f0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.654828 4701 generic.go:334] "Generic (PLEG): container finished" podID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerID="3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79" exitCode=0 Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.654896 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zq7bf" event={"ID":"4cd49166-63d9-411e-aef7-6e5f196d78f0","Type":"ContainerDied","Data":"3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79"} Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.654937 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zq7bf" event={"ID":"4cd49166-63d9-411e-aef7-6e5f196d78f0","Type":"ContainerDied","Data":"98a9ae01f861f4eafa0633f938ab039b053fb7b81c3d16e4a80da7cee015f893"} Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.654968 4701 scope.go:117] "RemoveContainer" containerID="3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.655143 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zq7bf" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.692749 4701 scope.go:117] "RemoveContainer" containerID="ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.701409 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zq7bf"] Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.704361 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zq7bf"] Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.745761 4701 scope.go:117] "RemoveContainer" containerID="e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.791876 4701 scope.go:117] "RemoveContainer" containerID="3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79" Nov 21 19:13:49 crc kubenswrapper[4701]: E1121 19:13:49.792919 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79\": container with ID starting with 3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79 not found: ID does not exist" containerID="3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.792968 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79"} err="failed to get container status \"3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79\": rpc error: code = NotFound desc = could not find container \"3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79\": container with ID starting with 3259be968f0ee14922da3007b4790b3ed4fba9f413d71bf43ff5c99dae676d79 not found: ID does not exist" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.793004 4701 scope.go:117] "RemoveContainer" containerID="ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6" Nov 21 19:13:49 crc kubenswrapper[4701]: E1121 19:13:49.794339 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6\": container with ID starting with ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6 not found: ID does not exist" containerID="ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.794430 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6"} err="failed to get container status \"ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6\": rpc error: code = NotFound desc = could not find container \"ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6\": container with ID starting with ec9766fb9869354e00de6425e2581609803c17cae953e4d7d96ee92d219f65e6 not found: ID does not exist" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.794486 4701 scope.go:117] "RemoveContainer" containerID="e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118" Nov 21 19:13:49 crc kubenswrapper[4701]: E1121 19:13:49.796463 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118\": container with ID starting with e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118 not found: ID does not exist" containerID="e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.796510 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118"} err="failed to get container status \"e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118\": rpc error: code = NotFound desc = could not find container \"e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118\": container with ID starting with e7b8d3ad773b7efd843aaf2d5c4ee8995ecaf89eeb638ead2fb912522285b118 not found: ID does not exist" Nov 21 19:13:49 crc kubenswrapper[4701]: I1121 19:13:49.982448 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" path="/var/lib/kubelet/pods/4cd49166-63d9-411e-aef7-6e5f196d78f0/volumes" Nov 21 19:13:54 crc kubenswrapper[4701]: I1121 19:13:54.581917 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-cwddx" podUID="f7def574-9941-4933-83df-3f20df5797d4" containerName="console" containerID="cri-o://b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51" gracePeriod=15 Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.143341 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-cwddx_f7def574-9941-4933-83df-3f20df5797d4/console/0.log" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.144382 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.231987 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-service-ca\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.232084 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-oauth-config\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.232329 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-serving-cert\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.232364 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sw8h\" (UniqueName: \"kubernetes.io/projected/f7def574-9941-4933-83df-3f20df5797d4-kube-api-access-4sw8h\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.232419 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-oauth-serving-cert\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.232518 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-trusted-ca-bundle\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.232543 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-console-config\") pod \"f7def574-9941-4933-83df-3f20df5797d4\" (UID: \"f7def574-9941-4933-83df-3f20df5797d4\") " Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.233919 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-console-config" (OuterVolumeSpecName: "console-config") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.233931 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-service-ca" (OuterVolumeSpecName: "service-ca") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.234047 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.234058 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.241553 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7def574-9941-4933-83df-3f20df5797d4-kube-api-access-4sw8h" (OuterVolumeSpecName: "kube-api-access-4sw8h") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "kube-api-access-4sw8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.242550 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.245268 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "f7def574-9941-4933-83df-3f20df5797d4" (UID: "f7def574-9941-4933-83df-3f20df5797d4"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334463 4701 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334525 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sw8h\" (UniqueName: \"kubernetes.io/projected/f7def574-9941-4933-83df-3f20df5797d4-kube-api-access-4sw8h\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334558 4701 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334585 4701 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334611 4701 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334638 4701 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f7def574-9941-4933-83df-3f20df5797d4-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.334657 4701 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f7def574-9941-4933-83df-3f20df5797d4-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.712596 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-cwddx_f7def574-9941-4933-83df-3f20df5797d4/console/0.log" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.713113 4701 generic.go:334] "Generic (PLEG): container finished" podID="f7def574-9941-4933-83df-3f20df5797d4" containerID="b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51" exitCode=2 Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.713166 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-cwddx" event={"ID":"f7def574-9941-4933-83df-3f20df5797d4","Type":"ContainerDied","Data":"b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51"} Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.713250 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-cwddx" event={"ID":"f7def574-9941-4933-83df-3f20df5797d4","Type":"ContainerDied","Data":"2153e73e7611e6c8cc808341bb0fa415e25bc859ed2941398dbf12779cac1cd3"} Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.713301 4701 scope.go:117] "RemoveContainer" containerID="b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.713373 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-cwddx" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.751600 4701 scope.go:117] "RemoveContainer" containerID="b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51" Nov 21 19:13:55 crc kubenswrapper[4701]: E1121 19:13:55.752453 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51\": container with ID starting with b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51 not found: ID does not exist" containerID="b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.752507 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51"} err="failed to get container status \"b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51\": rpc error: code = NotFound desc = could not find container \"b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51\": container with ID starting with b19c5cafe29e1768e2d6df2eee994afe070120f77fc2b5571bfab081b8ba9d51 not found: ID does not exist" Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.768875 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-cwddx"] Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.775532 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-cwddx"] Nov 21 19:13:55 crc kubenswrapper[4701]: I1121 19:13:55.962438 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7def574-9941-4933-83df-3f20df5797d4" path="/var/lib/kubelet/pods/f7def574-9941-4933-83df-3f20df5797d4/volumes" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.646989 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x"] Nov 21 19:13:56 crc kubenswrapper[4701]: E1121 19:13:56.647270 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="registry-server" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.647289 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="registry-server" Nov 21 19:13:56 crc kubenswrapper[4701]: E1121 19:13:56.647308 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="extract-content" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.647316 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="extract-content" Nov 21 19:13:56 crc kubenswrapper[4701]: E1121 19:13:56.647332 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="extract-utilities" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.647341 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="extract-utilities" Nov 21 19:13:56 crc kubenswrapper[4701]: E1121 19:13:56.647351 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7def574-9941-4933-83df-3f20df5797d4" containerName="console" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.647359 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7def574-9941-4933-83df-3f20df5797d4" containerName="console" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.647487 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7def574-9941-4933-83df-3f20df5797d4" containerName="console" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.647499 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cd49166-63d9-411e-aef7-6e5f196d78f0" containerName="registry-server" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.648470 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.650909 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.660410 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x"] Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.755819 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.755889 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.755957 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qksxr\" (UniqueName: \"kubernetes.io/projected/504f2cb2-d553-4a6e-8a22-b3c111a55808-kube-api-access-qksxr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.857436 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.857619 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qksxr\" (UniqueName: \"kubernetes.io/projected/504f2cb2-d553-4a6e-8a22-b3c111a55808-kube-api-access-qksxr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.857709 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.858150 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.858477 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.895772 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qksxr\" (UniqueName: \"kubernetes.io/projected/504f2cb2-d553-4a6e-8a22-b3c111a55808-kube-api-access-qksxr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:56 crc kubenswrapper[4701]: I1121 19:13:56.970852 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:13:57 crc kubenswrapper[4701]: I1121 19:13:57.496325 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x"] Nov 21 19:13:57 crc kubenswrapper[4701]: I1121 19:13:57.730349 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" event={"ID":"504f2cb2-d553-4a6e-8a22-b3c111a55808","Type":"ContainerStarted","Data":"aaa5d8db4b04b7748657becdddabedf0e6f103fb18222e77a2fd0ba38a291f10"} Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.742511 4701 generic.go:334] "Generic (PLEG): container finished" podID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerID="c1452f0e5f297331cff5e0aede4b45819b31e7f9e043dec5ffd919beeb29269d" exitCode=0 Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.744623 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" event={"ID":"504f2cb2-d553-4a6e-8a22-b3c111a55808","Type":"ContainerDied","Data":"c1452f0e5f297331cff5e0aede4b45819b31e7f9e043dec5ffd919beeb29269d"} Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.773750 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-778h8"] Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.781854 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.798545 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-778h8"] Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.886482 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bjpk\" (UniqueName: \"kubernetes.io/projected/3b4c846c-d2bd-4fea-887f-644612eed4ce-kube-api-access-7bjpk\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.886556 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-utilities\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.886614 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-catalog-content\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.988666 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-utilities\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.988724 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-catalog-content\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.988942 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bjpk\" (UniqueName: \"kubernetes.io/projected/3b4c846c-d2bd-4fea-887f-644612eed4ce-kube-api-access-7bjpk\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.989650 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-utilities\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:58 crc kubenswrapper[4701]: I1121 19:13:58.989951 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-catalog-content\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:59 crc kubenswrapper[4701]: I1121 19:13:59.023388 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bjpk\" (UniqueName: \"kubernetes.io/projected/3b4c846c-d2bd-4fea-887f-644612eed4ce-kube-api-access-7bjpk\") pod \"redhat-operators-778h8\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:59 crc kubenswrapper[4701]: I1121 19:13:59.155975 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:13:59 crc kubenswrapper[4701]: I1121 19:13:59.643337 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-778h8"] Nov 21 19:13:59 crc kubenswrapper[4701]: I1121 19:13:59.752842 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerStarted","Data":"72a98d7051adca05191444199b36ab7bc752f7e227e3782314f0a7faecad0004"} Nov 21 19:14:00 crc kubenswrapper[4701]: I1121 19:14:00.762330 4701 generic.go:334] "Generic (PLEG): container finished" podID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerID="99ae87c3e7e35ac1c9fbcf1314c0ad2970acef9ae46e2397033c1718d3a32d27" exitCode=0 Nov 21 19:14:00 crc kubenswrapper[4701]: I1121 19:14:00.762437 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" event={"ID":"504f2cb2-d553-4a6e-8a22-b3c111a55808","Type":"ContainerDied","Data":"99ae87c3e7e35ac1c9fbcf1314c0ad2970acef9ae46e2397033c1718d3a32d27"} Nov 21 19:14:00 crc kubenswrapper[4701]: I1121 19:14:00.765954 4701 generic.go:334] "Generic (PLEG): container finished" podID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerID="2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6" exitCode=0 Nov 21 19:14:00 crc kubenswrapper[4701]: I1121 19:14:00.766009 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerDied","Data":"2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6"} Nov 21 19:14:01 crc kubenswrapper[4701]: I1121 19:14:01.777842 4701 generic.go:334] "Generic (PLEG): container finished" podID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerID="59224e81451b530964159632132fa5cb2f259ba245fbd428265b2e7aee57cda2" exitCode=0 Nov 21 19:14:01 crc kubenswrapper[4701]: I1121 19:14:01.778007 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" event={"ID":"504f2cb2-d553-4a6e-8a22-b3c111a55808","Type":"ContainerDied","Data":"59224e81451b530964159632132fa5cb2f259ba245fbd428265b2e7aee57cda2"} Nov 21 19:14:01 crc kubenswrapper[4701]: I1121 19:14:01.780591 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerStarted","Data":"2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710"} Nov 21 19:14:02 crc kubenswrapper[4701]: I1121 19:14:02.787730 4701 generic.go:334] "Generic (PLEG): container finished" podID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerID="2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710" exitCode=0 Nov 21 19:14:02 crc kubenswrapper[4701]: I1121 19:14:02.787940 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerDied","Data":"2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710"} Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.139428 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.259971 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-bundle\") pod \"504f2cb2-d553-4a6e-8a22-b3c111a55808\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.260093 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qksxr\" (UniqueName: \"kubernetes.io/projected/504f2cb2-d553-4a6e-8a22-b3c111a55808-kube-api-access-qksxr\") pod \"504f2cb2-d553-4a6e-8a22-b3c111a55808\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.260173 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-util\") pod \"504f2cb2-d553-4a6e-8a22-b3c111a55808\" (UID: \"504f2cb2-d553-4a6e-8a22-b3c111a55808\") " Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.261129 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-bundle" (OuterVolumeSpecName: "bundle") pod "504f2cb2-d553-4a6e-8a22-b3c111a55808" (UID: "504f2cb2-d553-4a6e-8a22-b3c111a55808"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.272822 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/504f2cb2-d553-4a6e-8a22-b3c111a55808-kube-api-access-qksxr" (OuterVolumeSpecName: "kube-api-access-qksxr") pod "504f2cb2-d553-4a6e-8a22-b3c111a55808" (UID: "504f2cb2-d553-4a6e-8a22-b3c111a55808"). InnerVolumeSpecName "kube-api-access-qksxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.363254 4701 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.363869 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qksxr\" (UniqueName: \"kubernetes.io/projected/504f2cb2-d553-4a6e-8a22-b3c111a55808-kube-api-access-qksxr\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.480653 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-util" (OuterVolumeSpecName: "util") pod "504f2cb2-d553-4a6e-8a22-b3c111a55808" (UID: "504f2cb2-d553-4a6e-8a22-b3c111a55808"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.565890 4701 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/504f2cb2-d553-4a6e-8a22-b3c111a55808-util\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.799094 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerStarted","Data":"9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4"} Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.802816 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" event={"ID":"504f2cb2-d553-4a6e-8a22-b3c111a55808","Type":"ContainerDied","Data":"aaa5d8db4b04b7748657becdddabedf0e6f103fb18222e77a2fd0ba38a291f10"} Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.802881 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aaa5d8db4b04b7748657becdddabedf0e6f103fb18222e77a2fd0ba38a291f10" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.803191 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x" Nov 21 19:14:03 crc kubenswrapper[4701]: I1121 19:14:03.830585 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-778h8" podStartSLOduration=3.351000758 podStartE2EDuration="5.830557565s" podCreationTimestamp="2025-11-21 19:13:58 +0000 UTC" firstStartedPulling="2025-11-21 19:14:00.767091196 +0000 UTC m=+731.552231223" lastFinishedPulling="2025-11-21 19:14:03.246648003 +0000 UTC m=+734.031788030" observedRunningTime="2025-11-21 19:14:03.823859843 +0000 UTC m=+734.608999900" watchObservedRunningTime="2025-11-21 19:14:03.830557565 +0000 UTC m=+734.615697632" Nov 21 19:14:09 crc kubenswrapper[4701]: I1121 19:14:09.156375 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:14:09 crc kubenswrapper[4701]: I1121 19:14:09.156952 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:14:10 crc kubenswrapper[4701]: I1121 19:14:10.214761 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-778h8" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="registry-server" probeResult="failure" output=< Nov 21 19:14:10 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:14:10 crc kubenswrapper[4701]: > Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.866191 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9"] Nov 21 19:14:13 crc kubenswrapper[4701]: E1121 19:14:13.867551 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="pull" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.867575 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="pull" Nov 21 19:14:13 crc kubenswrapper[4701]: E1121 19:14:13.867600 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="util" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.867609 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="util" Nov 21 19:14:13 crc kubenswrapper[4701]: E1121 19:14:13.867621 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="extract" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.867632 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="extract" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.867798 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="504f2cb2-d553-4a6e-8a22-b3c111a55808" containerName="extract" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.868430 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.871703 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.875285 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.875642 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-4fzh6" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.875839 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.876485 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.887147 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9"] Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.922313 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5glv2\" (UniqueName: \"kubernetes.io/projected/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-kube-api-access-5glv2\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.922368 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-webhook-cert\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:13 crc kubenswrapper[4701]: I1121 19:14:13.922404 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-apiservice-cert\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.024008 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5glv2\" (UniqueName: \"kubernetes.io/projected/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-kube-api-access-5glv2\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.024068 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-webhook-cert\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.024100 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-apiservice-cert\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.032893 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-apiservice-cert\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.032893 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-webhook-cert\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.041248 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5glv2\" (UniqueName: \"kubernetes.io/projected/1e13b4b2-e1e0-4f12-b8a4-e364e57407b1-kube-api-access-5glv2\") pod \"metallb-operator-controller-manager-5b89f66749-2c7l9\" (UID: \"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1\") " pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.141978 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr"] Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.142714 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.146651 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.146710 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.147386 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-mnlbs" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.165009 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr"] Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.186878 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.227083 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/394bdfbe-ca77-47ca-837c-68023e532b01-apiservice-cert\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.227887 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcdzc\" (UniqueName: \"kubernetes.io/projected/394bdfbe-ca77-47ca-837c-68023e532b01-kube-api-access-jcdzc\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.228028 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/394bdfbe-ca77-47ca-837c-68023e532b01-webhook-cert\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.330333 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/394bdfbe-ca77-47ca-837c-68023e532b01-apiservice-cert\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.330409 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcdzc\" (UniqueName: \"kubernetes.io/projected/394bdfbe-ca77-47ca-837c-68023e532b01-kube-api-access-jcdzc\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.330458 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/394bdfbe-ca77-47ca-837c-68023e532b01-webhook-cert\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.334863 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/394bdfbe-ca77-47ca-837c-68023e532b01-webhook-cert\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.356550 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/394bdfbe-ca77-47ca-837c-68023e532b01-apiservice-cert\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.367172 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcdzc\" (UniqueName: \"kubernetes.io/projected/394bdfbe-ca77-47ca-837c-68023e532b01-kube-api-access-jcdzc\") pod \"metallb-operator-webhook-server-7fffd4c557-7pqsr\" (UID: \"394bdfbe-ca77-47ca-837c-68023e532b01\") " pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.462098 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.875883 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9"] Nov 21 19:14:14 crc kubenswrapper[4701]: I1121 19:14:14.977484 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr"] Nov 21 19:14:14 crc kubenswrapper[4701]: W1121 19:14:14.992002 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod394bdfbe_ca77_47ca_837c_68023e532b01.slice/crio-f62fb410811e8b76eaa311a257c65b223a2d5909d3ec45f7fe1738505b4136ea WatchSource:0}: Error finding container f62fb410811e8b76eaa311a257c65b223a2d5909d3ec45f7fe1738505b4136ea: Status 404 returned error can't find the container with id f62fb410811e8b76eaa311a257c65b223a2d5909d3ec45f7fe1738505b4136ea Nov 21 19:14:15 crc kubenswrapper[4701]: I1121 19:14:15.892737 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" event={"ID":"394bdfbe-ca77-47ca-837c-68023e532b01","Type":"ContainerStarted","Data":"f62fb410811e8b76eaa311a257c65b223a2d5909d3ec45f7fe1738505b4136ea"} Nov 21 19:14:15 crc kubenswrapper[4701]: I1121 19:14:15.894404 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" event={"ID":"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1","Type":"ContainerStarted","Data":"866ae22ef89fb0eb8313de1cfd46eaf62e588f3a7fb66211622b3d79a2662230"} Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.368753 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-77x66"] Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.370163 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.382228 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-77x66"] Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.406562 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-catalog-content\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.406600 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-utilities\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.406624 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctxb7\" (UniqueName: \"kubernetes.io/projected/cb21951f-0e5e-4984-8094-8539f637ad9b-kube-api-access-ctxb7\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.511266 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-catalog-content\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.511306 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-utilities\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.511327 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctxb7\" (UniqueName: \"kubernetes.io/projected/cb21951f-0e5e-4984-8094-8539f637ad9b-kube-api-access-ctxb7\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.511904 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-utilities\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.512176 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-catalog-content\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.535062 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctxb7\" (UniqueName: \"kubernetes.io/projected/cb21951f-0e5e-4984-8094-8539f637ad9b-kube-api-access-ctxb7\") pod \"certified-operators-77x66\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.614634 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.615081 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.615129 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.615714 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c8758f6e0ff69b0e680f67ce66823ba447806821fa55aca9dc22f0075d6645fd"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.615774 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://c8758f6e0ff69b0e680f67ce66823ba447806821fa55aca9dc22f0075d6645fd" gracePeriod=600 Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.730152 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.925291 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="c8758f6e0ff69b0e680f67ce66823ba447806821fa55aca9dc22f0075d6645fd" exitCode=0 Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.925316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"c8758f6e0ff69b0e680f67ce66823ba447806821fa55aca9dc22f0075d6645fd"} Nov 21 19:14:18 crc kubenswrapper[4701]: I1121 19:14:18.925431 4701 scope.go:117] "RemoveContainer" containerID="af4d914cc7c263c798f4370559a31981e2c52301881b123a393037c80c3da1f8" Nov 21 19:14:19 crc kubenswrapper[4701]: I1121 19:14:19.239729 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:14:19 crc kubenswrapper[4701]: I1121 19:14:19.287681 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:14:20 crc kubenswrapper[4701]: I1121 19:14:20.558034 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-778h8"] Nov 21 19:14:20 crc kubenswrapper[4701]: I1121 19:14:20.936827 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-778h8" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="registry-server" containerID="cri-o://9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4" gracePeriod=2 Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.670960 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.698853 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-utilities\") pod \"3b4c846c-d2bd-4fea-887f-644612eed4ce\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.699079 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bjpk\" (UniqueName: \"kubernetes.io/projected/3b4c846c-d2bd-4fea-887f-644612eed4ce-kube-api-access-7bjpk\") pod \"3b4c846c-d2bd-4fea-887f-644612eed4ce\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.699126 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-catalog-content\") pod \"3b4c846c-d2bd-4fea-887f-644612eed4ce\" (UID: \"3b4c846c-d2bd-4fea-887f-644612eed4ce\") " Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.700803 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-utilities" (OuterVolumeSpecName: "utilities") pod "3b4c846c-d2bd-4fea-887f-644612eed4ce" (UID: "3b4c846c-d2bd-4fea-887f-644612eed4ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.707247 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b4c846c-d2bd-4fea-887f-644612eed4ce-kube-api-access-7bjpk" (OuterVolumeSpecName: "kube-api-access-7bjpk") pod "3b4c846c-d2bd-4fea-887f-644612eed4ce" (UID: "3b4c846c-d2bd-4fea-887f-644612eed4ce"). InnerVolumeSpecName "kube-api-access-7bjpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.797443 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b4c846c-d2bd-4fea-887f-644612eed4ce" (UID: "3b4c846c-d2bd-4fea-887f-644612eed4ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.804597 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bjpk\" (UniqueName: \"kubernetes.io/projected/3b4c846c-d2bd-4fea-887f-644612eed4ce-kube-api-access-7bjpk\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.804647 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.804658 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b4c846c-d2bd-4fea-887f-644612eed4ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.901817 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-77x66"] Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.947354 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"76a0edd10d4f17051fb2f677c020a3884e840a31cfe72eb7d10bdd5a1c9d63b1"} Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.955351 4701 generic.go:334] "Generic (PLEG): container finished" podID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerID="9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4" exitCode=0 Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.955570 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778h8" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.962858 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.962903 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerDied","Data":"9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4"} Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.962937 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778h8" event={"ID":"3b4c846c-d2bd-4fea-887f-644612eed4ce","Type":"ContainerDied","Data":"72a98d7051adca05191444199b36ab7bc752f7e227e3782314f0a7faecad0004"} Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.962952 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" event={"ID":"1e13b4b2-e1e0-4f12-b8a4-e364e57407b1","Type":"ContainerStarted","Data":"4cd9198bf6a51858e92473c76496bb0f330aeeb11a89fda134599a701d3aced1"} Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.962977 4701 scope.go:117] "RemoveContainer" containerID="9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.966795 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" event={"ID":"394bdfbe-ca77-47ca-837c-68023e532b01","Type":"ContainerStarted","Data":"92c4b62bec4c92d6c4a94197f779bd64dbaa5281406788e13d1ebd2ec7218dab"} Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.967439 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.970813 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerStarted","Data":"a3f95ce0195f2e1afb056ee208bb352b5ab996771169b4f189689c31e380a9d7"} Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.989548 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" podStartSLOduration=2.585458212 podStartE2EDuration="8.989513238s" podCreationTimestamp="2025-11-21 19:14:13 +0000 UTC" firstStartedPulling="2025-11-21 19:14:14.889621607 +0000 UTC m=+745.674761634" lastFinishedPulling="2025-11-21 19:14:21.293676633 +0000 UTC m=+752.078816660" observedRunningTime="2025-11-21 19:14:21.983016411 +0000 UTC m=+752.768156468" watchObservedRunningTime="2025-11-21 19:14:21.989513238 +0000 UTC m=+752.774653265" Nov 21 19:14:21 crc kubenswrapper[4701]: I1121 19:14:21.991474 4701 scope.go:117] "RemoveContainer" containerID="2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.019795 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" podStartSLOduration=1.712991956 podStartE2EDuration="8.019772547s" podCreationTimestamp="2025-11-21 19:14:14 +0000 UTC" firstStartedPulling="2025-11-21 19:14:14.99641111 +0000 UTC m=+745.781551137" lastFinishedPulling="2025-11-21 19:14:21.303191701 +0000 UTC m=+752.088331728" observedRunningTime="2025-11-21 19:14:22.016983872 +0000 UTC m=+752.802123899" watchObservedRunningTime="2025-11-21 19:14:22.019772547 +0000 UTC m=+752.804912574" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.021426 4701 scope.go:117] "RemoveContainer" containerID="2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.041454 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-778h8"] Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.046929 4701 scope.go:117] "RemoveContainer" containerID="9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4" Nov 21 19:14:22 crc kubenswrapper[4701]: E1121 19:14:22.047569 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4\": container with ID starting with 9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4 not found: ID does not exist" containerID="9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.047622 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4"} err="failed to get container status \"9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4\": rpc error: code = NotFound desc = could not find container \"9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4\": container with ID starting with 9749c4f9cb6d9e57827f641557916771f997378e9ea9186c8bf302abdf4667e4 not found: ID does not exist" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.047654 4701 scope.go:117] "RemoveContainer" containerID="2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710" Nov 21 19:14:22 crc kubenswrapper[4701]: E1121 19:14:22.048120 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710\": container with ID starting with 2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710 not found: ID does not exist" containerID="2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.048147 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710"} err="failed to get container status \"2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710\": rpc error: code = NotFound desc = could not find container \"2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710\": container with ID starting with 2578fc4a4d1aa087bd700b67ca31d2bc7d72d78bc903cfc7053f99551d706710 not found: ID does not exist" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.048163 4701 scope.go:117] "RemoveContainer" containerID="2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6" Nov 21 19:14:22 crc kubenswrapper[4701]: E1121 19:14:22.048561 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6\": container with ID starting with 2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6 not found: ID does not exist" containerID="2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.048629 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6"} err="failed to get container status \"2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6\": rpc error: code = NotFound desc = could not find container \"2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6\": container with ID starting with 2b5e3458501d8afb57998755c55dbe2635e8ad371cc50a923c785533576359f6 not found: ID does not exist" Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.048785 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-778h8"] Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.981574 4701 generic.go:334] "Generic (PLEG): container finished" podID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerID="c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541" exitCode=0 Nov 21 19:14:22 crc kubenswrapper[4701]: I1121 19:14:22.981755 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerDied","Data":"c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541"} Nov 21 19:14:23 crc kubenswrapper[4701]: I1121 19:14:23.963854 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" path="/var/lib/kubelet/pods/3b4c846c-d2bd-4fea-887f-644612eed4ce/volumes" Nov 21 19:14:24 crc kubenswrapper[4701]: I1121 19:14:24.001692 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerStarted","Data":"6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0"} Nov 21 19:14:25 crc kubenswrapper[4701]: I1121 19:14:25.024068 4701 generic.go:334] "Generic (PLEG): container finished" podID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerID="6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0" exitCode=0 Nov 21 19:14:25 crc kubenswrapper[4701]: I1121 19:14:25.024226 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerDied","Data":"6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0"} Nov 21 19:14:26 crc kubenswrapper[4701]: I1121 19:14:26.054992 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerStarted","Data":"ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff"} Nov 21 19:14:26 crc kubenswrapper[4701]: I1121 19:14:26.102461 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-77x66" podStartSLOduration=5.688487445 podStartE2EDuration="8.102443113s" podCreationTimestamp="2025-11-21 19:14:18 +0000 UTC" firstStartedPulling="2025-11-21 19:14:22.986843742 +0000 UTC m=+753.771983779" lastFinishedPulling="2025-11-21 19:14:25.40079938 +0000 UTC m=+756.185939447" observedRunningTime="2025-11-21 19:14:26.097855208 +0000 UTC m=+756.882995235" watchObservedRunningTime="2025-11-21 19:14:26.102443113 +0000 UTC m=+756.887583140" Nov 21 19:14:28 crc kubenswrapper[4701]: I1121 19:14:28.731189 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:28 crc kubenswrapper[4701]: I1121 19:14:28.731277 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:28 crc kubenswrapper[4701]: I1121 19:14:28.788803 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:34 crc kubenswrapper[4701]: I1121 19:14:34.469541 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7fffd4c557-7pqsr" Nov 21 19:14:38 crc kubenswrapper[4701]: I1121 19:14:38.809378 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.164322 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-77x66"] Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.165390 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-77x66" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="registry-server" containerID="cri-o://ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff" gracePeriod=2 Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.636003 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.750127 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-catalog-content\") pod \"cb21951f-0e5e-4984-8094-8539f637ad9b\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.750326 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctxb7\" (UniqueName: \"kubernetes.io/projected/cb21951f-0e5e-4984-8094-8539f637ad9b-kube-api-access-ctxb7\") pod \"cb21951f-0e5e-4984-8094-8539f637ad9b\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.750391 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-utilities\") pod \"cb21951f-0e5e-4984-8094-8539f637ad9b\" (UID: \"cb21951f-0e5e-4984-8094-8539f637ad9b\") " Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.751699 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-utilities" (OuterVolumeSpecName: "utilities") pod "cb21951f-0e5e-4984-8094-8539f637ad9b" (UID: "cb21951f-0e5e-4984-8094-8539f637ad9b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.760462 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb21951f-0e5e-4984-8094-8539f637ad9b-kube-api-access-ctxb7" (OuterVolumeSpecName: "kube-api-access-ctxb7") pod "cb21951f-0e5e-4984-8094-8539f637ad9b" (UID: "cb21951f-0e5e-4984-8094-8539f637ad9b"). InnerVolumeSpecName "kube-api-access-ctxb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.810792 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb21951f-0e5e-4984-8094-8539f637ad9b" (UID: "cb21951f-0e5e-4984-8094-8539f637ad9b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.852653 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.852715 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctxb7\" (UniqueName: \"kubernetes.io/projected/cb21951f-0e5e-4984-8094-8539f637ad9b-kube-api-access-ctxb7\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:41 crc kubenswrapper[4701]: I1121 19:14:41.852742 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb21951f-0e5e-4984-8094-8539f637ad9b-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.185443 4701 generic.go:334] "Generic (PLEG): container finished" podID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerID="ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff" exitCode=0 Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.185496 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerDied","Data":"ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff"} Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.185536 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77x66" event={"ID":"cb21951f-0e5e-4984-8094-8539f637ad9b","Type":"ContainerDied","Data":"a3f95ce0195f2e1afb056ee208bb352b5ab996771169b4f189689c31e380a9d7"} Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.185561 4701 scope.go:117] "RemoveContainer" containerID="ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.187906 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77x66" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.218002 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-77x66"] Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.218450 4701 scope.go:117] "RemoveContainer" containerID="6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.223788 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-77x66"] Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.245261 4701 scope.go:117] "RemoveContainer" containerID="c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.281907 4701 scope.go:117] "RemoveContainer" containerID="ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff" Nov 21 19:14:42 crc kubenswrapper[4701]: E1121 19:14:42.284042 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff\": container with ID starting with ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff not found: ID does not exist" containerID="ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.284189 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff"} err="failed to get container status \"ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff\": rpc error: code = NotFound desc = could not find container \"ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff\": container with ID starting with ff0646a7e42236000f4809a1de8a472e4ec95adae01bc37fade6f95dca7f24ff not found: ID does not exist" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.284288 4701 scope.go:117] "RemoveContainer" containerID="6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0" Nov 21 19:14:42 crc kubenswrapper[4701]: E1121 19:14:42.285109 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0\": container with ID starting with 6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0 not found: ID does not exist" containerID="6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.285159 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0"} err="failed to get container status \"6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0\": rpc error: code = NotFound desc = could not find container \"6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0\": container with ID starting with 6fff4009298b0415580bf71e870f96735cfe0fd5c0267ed8c8d9398c3db568b0 not found: ID does not exist" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.285197 4701 scope.go:117] "RemoveContainer" containerID="c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541" Nov 21 19:14:42 crc kubenswrapper[4701]: E1121 19:14:42.285779 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541\": container with ID starting with c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541 not found: ID does not exist" containerID="c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541" Nov 21 19:14:42 crc kubenswrapper[4701]: I1121 19:14:42.285854 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541"} err="failed to get container status \"c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541\": rpc error: code = NotFound desc = could not find container \"c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541\": container with ID starting with c383208b08f213888d6b1bbfb4bad142e57fde49b7ee605adef43d80583ba541 not found: ID does not exist" Nov 21 19:14:43 crc kubenswrapper[4701]: I1121 19:14:43.968538 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" path="/var/lib/kubelet/pods/cb21951f-0e5e-4984-8094-8539f637ad9b/volumes" Nov 21 19:14:54 crc kubenswrapper[4701]: I1121 19:14:54.190860 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5b89f66749-2c7l9" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.065546 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-9ssmb"] Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.066001 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="extract-utilities" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066022 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="extract-utilities" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.066034 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="extract-content" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066042 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="extract-content" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.066056 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="registry-server" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066069 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="registry-server" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.066111 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="registry-server" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066122 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="registry-server" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.066148 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="extract-content" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066157 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="extract-content" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.066168 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="extract-utilities" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066176 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="extract-utilities" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066361 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb21951f-0e5e-4984-8094-8539f637ad9b" containerName="registry-server" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.066378 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b4c846c-d2bd-4fea-887f-644612eed4ce" containerName="registry-server" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.069492 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.069961 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd"] Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.071116 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.075103 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.075356 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.076668 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.079648 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-b7rwb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.085371 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd"] Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.163759 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-hmzkz"] Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.165170 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.168684 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.168862 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.169021 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-dzzzb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.169146 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.182171 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-htbbv"] Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.191792 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.195580 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-reloader\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.195868 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a5f9d84-176f-4592-b663-03adffd0073f-metrics-certs\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.195962 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-frr-sockets\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196110 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc0b171f-3d9f-41b9-914b-ca723de8416f-cert\") pod \"frr-k8s-webhook-server-6998585d5-4zbkd\" (UID: \"dc0b171f-3d9f-41b9-914b-ca723de8416f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196157 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-frr-conf\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196182 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-metrics\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196389 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-456b8\" (UniqueName: \"kubernetes.io/projected/5a5f9d84-176f-4592-b663-03adffd0073f-kube-api-access-456b8\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196630 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74mp2\" (UniqueName: \"kubernetes.io/projected/dc0b171f-3d9f-41b9-914b-ca723de8416f-kube-api-access-74mp2\") pod \"frr-k8s-webhook-server-6998585d5-4zbkd\" (UID: \"dc0b171f-3d9f-41b9-914b-ca723de8416f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196759 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5a5f9d84-176f-4592-b663-03adffd0073f-frr-startup\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.196855 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.210556 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-htbbv"] Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.298884 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a5f9d84-176f-4592-b663-03adffd0073f-metrics-certs\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.298962 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfmmj\" (UniqueName: \"kubernetes.io/projected/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-kube-api-access-kfmmj\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.298999 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-frr-sockets\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299082 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fa17db1f-33dc-4f0e-a191-7a01d67c575d-metallb-excludel2\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299125 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc0b171f-3d9f-41b9-914b-ca723de8416f-cert\") pod \"frr-k8s-webhook-server-6998585d5-4zbkd\" (UID: \"dc0b171f-3d9f-41b9-914b-ca723de8416f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299156 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-cert\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299182 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-metrics-certs\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299231 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-frr-conf\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299321 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-metrics\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299399 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7wh7\" (UniqueName: \"kubernetes.io/projected/fa17db1f-33dc-4f0e-a191-7a01d67c575d-kube-api-access-p7wh7\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299456 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-456b8\" (UniqueName: \"kubernetes.io/projected/5a5f9d84-176f-4592-b663-03adffd0073f-kube-api-access-456b8\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299530 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-frr-sockets\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299561 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74mp2\" (UniqueName: \"kubernetes.io/projected/dc0b171f-3d9f-41b9-914b-ca723de8416f-kube-api-access-74mp2\") pod \"frr-k8s-webhook-server-6998585d5-4zbkd\" (UID: \"dc0b171f-3d9f-41b9-914b-ca723de8416f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299650 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5a5f9d84-176f-4592-b663-03adffd0073f-frr-startup\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299730 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-metrics-certs\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299826 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-frr-conf\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299882 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-metrics\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.299878 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-reloader\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.300061 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.300241 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5a5f9d84-176f-4592-b663-03adffd0073f-reloader\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.300720 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5a5f9d84-176f-4592-b663-03adffd0073f-frr-startup\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.306656 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc0b171f-3d9f-41b9-914b-ca723de8416f-cert\") pod \"frr-k8s-webhook-server-6998585d5-4zbkd\" (UID: \"dc0b171f-3d9f-41b9-914b-ca723de8416f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.306757 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5a5f9d84-176f-4592-b663-03adffd0073f-metrics-certs\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.314813 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-456b8\" (UniqueName: \"kubernetes.io/projected/5a5f9d84-176f-4592-b663-03adffd0073f-kube-api-access-456b8\") pod \"frr-k8s-9ssmb\" (UID: \"5a5f9d84-176f-4592-b663-03adffd0073f\") " pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.317091 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74mp2\" (UniqueName: \"kubernetes.io/projected/dc0b171f-3d9f-41b9-914b-ca723de8416f-kube-api-access-74mp2\") pod \"frr-k8s-webhook-server-6998585d5-4zbkd\" (UID: \"dc0b171f-3d9f-41b9-914b-ca723de8416f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.387429 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.394361 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.401937 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fa17db1f-33dc-4f0e-a191-7a01d67c575d-metallb-excludel2\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402009 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-cert\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402058 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-metrics-certs\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402091 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7wh7\" (UniqueName: \"kubernetes.io/projected/fa17db1f-33dc-4f0e-a191-7a01d67c575d-kube-api-access-p7wh7\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402150 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-metrics-certs\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402228 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402274 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfmmj\" (UniqueName: \"kubernetes.io/projected/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-kube-api-access-kfmmj\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.402731 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fa17db1f-33dc-4f0e-a191-7a01d67c575d-metallb-excludel2\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.403559 4701 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.403651 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist podName:fa17db1f-33dc-4f0e-a191-7a01d67c575d nodeName:}" failed. No retries permitted until 2025-11-21 19:14:55.903627959 +0000 UTC m=+786.688767986 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist") pod "speaker-hmzkz" (UID: "fa17db1f-33dc-4f0e-a191-7a01d67c575d") : secret "metallb-memberlist" not found Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.407481 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-metrics-certs\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.407764 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-metrics-certs\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.408410 4701 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.423320 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfmmj\" (UniqueName: \"kubernetes.io/projected/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-kube-api-access-kfmmj\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.423835 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c-cert\") pod \"controller-6c7b4b5f48-htbbv\" (UID: \"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c\") " pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.434000 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7wh7\" (UniqueName: \"kubernetes.io/projected/fa17db1f-33dc-4f0e-a191-7a01d67c575d-kube-api-access-p7wh7\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.505234 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.811931 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd"] Nov 21 19:14:55 crc kubenswrapper[4701]: W1121 19:14:55.819964 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc0b171f_3d9f_41b9_914b_ca723de8416f.slice/crio-f67379bfce5d9daa1fd9f3ac0762889be20b94515eab1364de8059204deabfd5 WatchSource:0}: Error finding container f67379bfce5d9daa1fd9f3ac0762889be20b94515eab1364de8059204deabfd5: Status 404 returned error can't find the container with id f67379bfce5d9daa1fd9f3ac0762889be20b94515eab1364de8059204deabfd5 Nov 21 19:14:55 crc kubenswrapper[4701]: I1121 19:14:55.911002 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.911309 4701 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 19:14:55 crc kubenswrapper[4701]: E1121 19:14:55.911460 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist podName:fa17db1f-33dc-4f0e-a191-7a01d67c575d nodeName:}" failed. No retries permitted until 2025-11-21 19:14:56.911419379 +0000 UTC m=+787.696559426 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist") pod "speaker-hmzkz" (UID: "fa17db1f-33dc-4f0e-a191-7a01d67c575d") : secret "metallb-memberlist" not found Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.018766 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-htbbv"] Nov 21 19:14:56 crc kubenswrapper[4701]: W1121 19:14:56.027553 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0e3f5ca_8ea5_40a2_b362_49c12a9c2f0c.slice/crio-c65019834afe914b80352a04b496220815b3edf0bcdd1d9453ec0f5704d934c6 WatchSource:0}: Error finding container c65019834afe914b80352a04b496220815b3edf0bcdd1d9453ec0f5704d934c6: Status 404 returned error can't find the container with id c65019834afe914b80352a04b496220815b3edf0bcdd1d9453ec0f5704d934c6 Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.326675 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"dfedfb3ff397e72f363e75e7d26281cbe0b0269200acc65453fa28c0d412dca9"} Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.328958 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-htbbv" event={"ID":"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c","Type":"ContainerStarted","Data":"75a990a5f0f60b7eb9dacdd173e60d194ca2fb94e5e12314a4c02bcc60495c65"} Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.328988 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-htbbv" event={"ID":"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c","Type":"ContainerStarted","Data":"c65019834afe914b80352a04b496220815b3edf0bcdd1d9453ec0f5704d934c6"} Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.330052 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" event={"ID":"dc0b171f-3d9f-41b9-914b-ca723de8416f","Type":"ContainerStarted","Data":"f67379bfce5d9daa1fd9f3ac0762889be20b94515eab1364de8059204deabfd5"} Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.926428 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.931989 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa17db1f-33dc-4f0e-a191-7a01d67c575d-memberlist\") pod \"speaker-hmzkz\" (UID: \"fa17db1f-33dc-4f0e-a191-7a01d67c575d\") " pod="metallb-system/speaker-hmzkz" Nov 21 19:14:56 crc kubenswrapper[4701]: I1121 19:14:56.979182 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-hmzkz" Nov 21 19:14:57 crc kubenswrapper[4701]: I1121 19:14:57.339227 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-hmzkz" event={"ID":"fa17db1f-33dc-4f0e-a191-7a01d67c575d","Type":"ContainerStarted","Data":"b5314d4a2201abb9f271f7aef75a9b5408aeed182c132a700bc32bde574ec48b"} Nov 21 19:14:57 crc kubenswrapper[4701]: I1121 19:14:57.341295 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-htbbv" event={"ID":"f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c","Type":"ContainerStarted","Data":"09e519bbbc7ba12ebbf166c875a603a51383e931863f77622f8ff81f559141a3"} Nov 21 19:14:57 crc kubenswrapper[4701]: I1121 19:14:57.342156 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:14:57 crc kubenswrapper[4701]: I1121 19:14:57.367600 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-htbbv" podStartSLOduration=2.367585595 podStartE2EDuration="2.367585595s" podCreationTimestamp="2025-11-21 19:14:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:14:57.362455016 +0000 UTC m=+788.147595043" watchObservedRunningTime="2025-11-21 19:14:57.367585595 +0000 UTC m=+788.152725622" Nov 21 19:14:58 crc kubenswrapper[4701]: I1121 19:14:58.352476 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-hmzkz" event={"ID":"fa17db1f-33dc-4f0e-a191-7a01d67c575d","Type":"ContainerStarted","Data":"29583e7246c6913c9e7cabeac044c1eff19c3411c142bddf5d9c95efcb2ec6b3"} Nov 21 19:14:58 crc kubenswrapper[4701]: I1121 19:14:58.352930 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-hmzkz" Nov 21 19:14:58 crc kubenswrapper[4701]: I1121 19:14:58.352942 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-hmzkz" event={"ID":"fa17db1f-33dc-4f0e-a191-7a01d67c575d","Type":"ContainerStarted","Data":"1bed0840a129de2ec95fdfda782520ee3483881b3aa4b048b75ab7af420b150e"} Nov 21 19:14:58 crc kubenswrapper[4701]: I1121 19:14:58.374990 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-hmzkz" podStartSLOduration=3.374962891 podStartE2EDuration="3.374962891s" podCreationTimestamp="2025-11-21 19:14:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:14:58.370282814 +0000 UTC m=+789.155422841" watchObservedRunningTime="2025-11-21 19:14:58.374962891 +0000 UTC m=+789.160102918" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.133043 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq"] Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.134430 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.137867 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.138049 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.143368 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq"] Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.178497 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-config-volume\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.178864 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-secret-volume\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.179458 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5trn\" (UniqueName: \"kubernetes.io/projected/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-kube-api-access-b5trn\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.286904 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-config-volume\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.287012 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-secret-volume\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.287119 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5trn\" (UniqueName: \"kubernetes.io/projected/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-kube-api-access-b5trn\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.287905 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-config-volume\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.303281 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-secret-volume\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.311321 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5trn\" (UniqueName: \"kubernetes.io/projected/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-kube-api-access-b5trn\") pod \"collect-profiles-29395875-xjspq\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.467332 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:00 crc kubenswrapper[4701]: I1121 19:15:00.919688 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq"] Nov 21 19:15:01 crc kubenswrapper[4701]: I1121 19:15:01.390541 4701 generic.go:334] "Generic (PLEG): container finished" podID="49b5f1c9-9af4-4f31-bc1b-187255d3c54d" containerID="b35425b01358249b044adc8ab2eda4643c4fc378e5abf6dd76d0a26346c8d878" exitCode=0 Nov 21 19:15:01 crc kubenswrapper[4701]: I1121 19:15:01.390614 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" event={"ID":"49b5f1c9-9af4-4f31-bc1b-187255d3c54d","Type":"ContainerDied","Data":"b35425b01358249b044adc8ab2eda4643c4fc378e5abf6dd76d0a26346c8d878"} Nov 21 19:15:01 crc kubenswrapper[4701]: I1121 19:15:01.390646 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" event={"ID":"49b5f1c9-9af4-4f31-bc1b-187255d3c54d","Type":"ContainerStarted","Data":"1d780341af1d3b3ade3881d4154ed519b2b596d63d7d98eb10c313696ab9da1b"} Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.433031 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.435746 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" event={"ID":"49b5f1c9-9af4-4f31-bc1b-187255d3c54d","Type":"ContainerDied","Data":"1d780341af1d3b3ade3881d4154ed519b2b596d63d7d98eb10c313696ab9da1b"} Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.435792 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d780341af1d3b3ade3881d4154ed519b2b596d63d7d98eb10c313696ab9da1b" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.577549 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5trn\" (UniqueName: \"kubernetes.io/projected/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-kube-api-access-b5trn\") pod \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.577655 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-secret-volume\") pod \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.577733 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-config-volume\") pod \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\" (UID: \"49b5f1c9-9af4-4f31-bc1b-187255d3c54d\") " Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.579285 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-config-volume" (OuterVolumeSpecName: "config-volume") pod "49b5f1c9-9af4-4f31-bc1b-187255d3c54d" (UID: "49b5f1c9-9af4-4f31-bc1b-187255d3c54d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.586081 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-kube-api-access-b5trn" (OuterVolumeSpecName: "kube-api-access-b5trn") pod "49b5f1c9-9af4-4f31-bc1b-187255d3c54d" (UID: "49b5f1c9-9af4-4f31-bc1b-187255d3c54d"). InnerVolumeSpecName "kube-api-access-b5trn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.586684 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "49b5f1c9-9af4-4f31-bc1b-187255d3c54d" (UID: "49b5f1c9-9af4-4f31-bc1b-187255d3c54d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.678955 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5trn\" (UniqueName: \"kubernetes.io/projected/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-kube-api-access-b5trn\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.678993 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:04 crc kubenswrapper[4701]: I1121 19:15:04.679007 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b5f1c9-9af4-4f31-bc1b-187255d3c54d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:05 crc kubenswrapper[4701]: I1121 19:15:05.447711 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" event={"ID":"dc0b171f-3d9f-41b9-914b-ca723de8416f","Type":"ContainerStarted","Data":"af6e1c169c239c4a48e1f0374d55c109af3105812d12afd794d90b3d2cc01ec2"} Nov 21 19:15:05 crc kubenswrapper[4701]: I1121 19:15:05.447892 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:15:05 crc kubenswrapper[4701]: I1121 19:15:05.451389 4701 generic.go:334] "Generic (PLEG): container finished" podID="5a5f9d84-176f-4592-b663-03adffd0073f" containerID="0c9fce9cb6f4650c4ffed8f513ebe4c9f31a4fa3930876bb649a22ab9a7565e5" exitCode=0 Nov 21 19:15:05 crc kubenswrapper[4701]: I1121 19:15:05.451445 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerDied","Data":"0c9fce9cb6f4650c4ffed8f513ebe4c9f31a4fa3930876bb649a22ab9a7565e5"} Nov 21 19:15:05 crc kubenswrapper[4701]: I1121 19:15:05.451516 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq" Nov 21 19:15:05 crc kubenswrapper[4701]: I1121 19:15:05.493743 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" podStartSLOduration=1.8403557780000002 podStartE2EDuration="10.493714953s" podCreationTimestamp="2025-11-21 19:14:55 +0000 UTC" firstStartedPulling="2025-11-21 19:14:55.824074201 +0000 UTC m=+786.609214228" lastFinishedPulling="2025-11-21 19:15:04.477433366 +0000 UTC m=+795.262573403" observedRunningTime="2025-11-21 19:15:05.487791003 +0000 UTC m=+796.272931100" watchObservedRunningTime="2025-11-21 19:15:05.493714953 +0000 UTC m=+796.278855010" Nov 21 19:15:06 crc kubenswrapper[4701]: I1121 19:15:06.470985 4701 generic.go:334] "Generic (PLEG): container finished" podID="5a5f9d84-176f-4592-b663-03adffd0073f" containerID="eee239204ce5047a182e81312e3143dab026a65b738d153318ee550b458db672" exitCode=0 Nov 21 19:15:06 crc kubenswrapper[4701]: I1121 19:15:06.473572 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerDied","Data":"eee239204ce5047a182e81312e3143dab026a65b738d153318ee550b458db672"} Nov 21 19:15:07 crc kubenswrapper[4701]: I1121 19:15:07.500152 4701 generic.go:334] "Generic (PLEG): container finished" podID="5a5f9d84-176f-4592-b663-03adffd0073f" containerID="eb9e1970f1cf4118e1f6f7928c016ed2622bd317ec24d95bd656177fb808896a" exitCode=0 Nov 21 19:15:07 crc kubenswrapper[4701]: I1121 19:15:07.500254 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerDied","Data":"eb9e1970f1cf4118e1f6f7928c016ed2622bd317ec24d95bd656177fb808896a"} Nov 21 19:15:08 crc kubenswrapper[4701]: I1121 19:15:08.518255 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"0954f5ee4440f92160d237852a1707a5b8db8923b6f945e17a95dd7ccc907b87"} Nov 21 19:15:08 crc kubenswrapper[4701]: I1121 19:15:08.519000 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"86da44ed5ac939e21c3b5aa5488e0bea6a011555020f393579c2793122d1e403"} Nov 21 19:15:08 crc kubenswrapper[4701]: I1121 19:15:08.519031 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"ad26b8791f82ddd1357ee2512e8106984ac487acb233fccdcafe4b60e394004c"} Nov 21 19:15:09 crc kubenswrapper[4701]: I1121 19:15:09.538571 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"661e8348b70a837c706fe89f2879afa941109bef9030ef1afb1c994abe792d8d"} Nov 21 19:15:09 crc kubenswrapper[4701]: I1121 19:15:09.538719 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"25faf2640abc6062a6ad0fb735b4c7e45ea63fba0eaee27ab612f7f55ffabf9d"} Nov 21 19:15:10 crc kubenswrapper[4701]: I1121 19:15:10.549949 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9ssmb" event={"ID":"5a5f9d84-176f-4592-b663-03adffd0073f","Type":"ContainerStarted","Data":"4aa2771f8025808e89de9bffb5b4422bf9f33db7ed63511f753ae17596290405"} Nov 21 19:15:10 crc kubenswrapper[4701]: I1121 19:15:10.550168 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:15:10 crc kubenswrapper[4701]: I1121 19:15:10.587649 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-9ssmb" podStartSLOduration=6.674809244 podStartE2EDuration="15.587632079s" podCreationTimestamp="2025-11-21 19:14:55 +0000 UTC" firstStartedPulling="2025-11-21 19:14:55.583134123 +0000 UTC m=+786.368274150" lastFinishedPulling="2025-11-21 19:15:04.495956948 +0000 UTC m=+795.281096985" observedRunningTime="2025-11-21 19:15:10.582216314 +0000 UTC m=+801.367356341" watchObservedRunningTime="2025-11-21 19:15:10.587632079 +0000 UTC m=+801.372772106" Nov 21 19:15:15 crc kubenswrapper[4701]: I1121 19:15:15.388399 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:15:15 crc kubenswrapper[4701]: I1121 19:15:15.403736 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-4zbkd" Nov 21 19:15:15 crc kubenswrapper[4701]: I1121 19:15:15.466521 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:15:15 crc kubenswrapper[4701]: I1121 19:15:15.512020 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-htbbv" Nov 21 19:15:16 crc kubenswrapper[4701]: I1121 19:15:16.985396 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-hmzkz" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.569622 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-c9k6l"] Nov 21 19:15:20 crc kubenswrapper[4701]: E1121 19:15:20.570151 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b5f1c9-9af4-4f31-bc1b-187255d3c54d" containerName="collect-profiles" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.570178 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b5f1c9-9af4-4f31-bc1b-187255d3c54d" containerName="collect-profiles" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.570436 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b5f1c9-9af4-4f31-bc1b-187255d3c54d" containerName="collect-profiles" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.579450 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.582166 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.583053 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-gns96" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.584279 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.591676 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-c9k6l"] Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.673635 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xh92\" (UniqueName: \"kubernetes.io/projected/b505e480-5a66-4c2f-92a5-dc67d964e6b5-kube-api-access-9xh92\") pod \"openstack-operator-index-c9k6l\" (UID: \"b505e480-5a66-4c2f-92a5-dc67d964e6b5\") " pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.775109 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xh92\" (UniqueName: \"kubernetes.io/projected/b505e480-5a66-4c2f-92a5-dc67d964e6b5-kube-api-access-9xh92\") pod \"openstack-operator-index-c9k6l\" (UID: \"b505e480-5a66-4c2f-92a5-dc67d964e6b5\") " pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.809573 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xh92\" (UniqueName: \"kubernetes.io/projected/b505e480-5a66-4c2f-92a5-dc67d964e6b5-kube-api-access-9xh92\") pod \"openstack-operator-index-c9k6l\" (UID: \"b505e480-5a66-4c2f-92a5-dc67d964e6b5\") " pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:20 crc kubenswrapper[4701]: I1121 19:15:20.902303 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:21 crc kubenswrapper[4701]: I1121 19:15:21.451587 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-c9k6l"] Nov 21 19:15:21 crc kubenswrapper[4701]: W1121 19:15:21.453351 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb505e480_5a66_4c2f_92a5_dc67d964e6b5.slice/crio-645e670c705e74a56c3c0e5c125843b4aa8bd0b39c6634c22a45521f24be9d68 WatchSource:0}: Error finding container 645e670c705e74a56c3c0e5c125843b4aa8bd0b39c6634c22a45521f24be9d68: Status 404 returned error can't find the container with id 645e670c705e74a56c3c0e5c125843b4aa8bd0b39c6634c22a45521f24be9d68 Nov 21 19:15:21 crc kubenswrapper[4701]: I1121 19:15:21.694248 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-c9k6l" event={"ID":"b505e480-5a66-4c2f-92a5-dc67d964e6b5","Type":"ContainerStarted","Data":"645e670c705e74a56c3c0e5c125843b4aa8bd0b39c6634c22a45521f24be9d68"} Nov 21 19:15:23 crc kubenswrapper[4701]: I1121 19:15:23.737189 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-c9k6l"] Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.348510 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-hm4sz"] Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.351709 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.358229 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hm4sz"] Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.441682 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjw9m\" (UniqueName: \"kubernetes.io/projected/42334fc1-ad97-4595-bb9b-4c7f736391e4-kube-api-access-fjw9m\") pod \"openstack-operator-index-hm4sz\" (UID: \"42334fc1-ad97-4595-bb9b-4c7f736391e4\") " pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.545320 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjw9m\" (UniqueName: \"kubernetes.io/projected/42334fc1-ad97-4595-bb9b-4c7f736391e4-kube-api-access-fjw9m\") pod \"openstack-operator-index-hm4sz\" (UID: \"42334fc1-ad97-4595-bb9b-4c7f736391e4\") " pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.584424 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjw9m\" (UniqueName: \"kubernetes.io/projected/42334fc1-ad97-4595-bb9b-4c7f736391e4-kube-api-access-fjw9m\") pod \"openstack-operator-index-hm4sz\" (UID: \"42334fc1-ad97-4595-bb9b-4c7f736391e4\") " pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.688750 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.723713 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-c9k6l" event={"ID":"b505e480-5a66-4c2f-92a5-dc67d964e6b5","Type":"ContainerStarted","Data":"3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd"} Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.724058 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-c9k6l" podUID="b505e480-5a66-4c2f-92a5-dc67d964e6b5" containerName="registry-server" containerID="cri-o://3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd" gracePeriod=2 Nov 21 19:15:24 crc kubenswrapper[4701]: I1121 19:15:24.754508 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-c9k6l" podStartSLOduration=1.8553611540000001 podStartE2EDuration="4.754481679s" podCreationTimestamp="2025-11-21 19:15:20 +0000 UTC" firstStartedPulling="2025-11-21 19:15:21.459192399 +0000 UTC m=+812.244332426" lastFinishedPulling="2025-11-21 19:15:24.358312924 +0000 UTC m=+815.143452951" observedRunningTime="2025-11-21 19:15:24.753607805 +0000 UTC m=+815.538747872" watchObservedRunningTime="2025-11-21 19:15:24.754481679 +0000 UTC m=+815.539621736" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.161354 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.202435 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hm4sz"] Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.260302 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xh92\" (UniqueName: \"kubernetes.io/projected/b505e480-5a66-4c2f-92a5-dc67d964e6b5-kube-api-access-9xh92\") pod \"b505e480-5a66-4c2f-92a5-dc67d964e6b5\" (UID: \"b505e480-5a66-4c2f-92a5-dc67d964e6b5\") " Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.266945 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b505e480-5a66-4c2f-92a5-dc67d964e6b5-kube-api-access-9xh92" (OuterVolumeSpecName: "kube-api-access-9xh92") pod "b505e480-5a66-4c2f-92a5-dc67d964e6b5" (UID: "b505e480-5a66-4c2f-92a5-dc67d964e6b5"). InnerVolumeSpecName "kube-api-access-9xh92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.362771 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xh92\" (UniqueName: \"kubernetes.io/projected/b505e480-5a66-4c2f-92a5-dc67d964e6b5-kube-api-access-9xh92\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.395864 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-9ssmb" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.735327 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hm4sz" event={"ID":"42334fc1-ad97-4595-bb9b-4c7f736391e4","Type":"ContainerStarted","Data":"e1e81beb7936abe59a99d5d9823eb0e0d27cf1c96aa912fdfdc6fe5c2068d16a"} Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.735402 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hm4sz" event={"ID":"42334fc1-ad97-4595-bb9b-4c7f736391e4","Type":"ContainerStarted","Data":"632eead2408cf62344b1d4043724115ecae9a873adb3da670f11b46f51c017cf"} Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.737948 4701 generic.go:334] "Generic (PLEG): container finished" podID="b505e480-5a66-4c2f-92a5-dc67d964e6b5" containerID="3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd" exitCode=0 Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.737993 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-c9k6l" event={"ID":"b505e480-5a66-4c2f-92a5-dc67d964e6b5","Type":"ContainerDied","Data":"3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd"} Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.738010 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-c9k6l" event={"ID":"b505e480-5a66-4c2f-92a5-dc67d964e6b5","Type":"ContainerDied","Data":"645e670c705e74a56c3c0e5c125843b4aa8bd0b39c6634c22a45521f24be9d68"} Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.738028 4701 scope.go:117] "RemoveContainer" containerID="3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.738158 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-c9k6l" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.762284 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-hm4sz" podStartSLOduration=1.714948514 podStartE2EDuration="1.762268337s" podCreationTimestamp="2025-11-21 19:15:24 +0000 UTC" firstStartedPulling="2025-11-21 19:15:25.223267851 +0000 UTC m=+816.008407878" lastFinishedPulling="2025-11-21 19:15:25.270587674 +0000 UTC m=+816.055727701" observedRunningTime="2025-11-21 19:15:25.759235355 +0000 UTC m=+816.544375392" watchObservedRunningTime="2025-11-21 19:15:25.762268337 +0000 UTC m=+816.547408364" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.783364 4701 scope.go:117] "RemoveContainer" containerID="3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd" Nov 21 19:15:25 crc kubenswrapper[4701]: E1121 19:15:25.784095 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd\": container with ID starting with 3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd not found: ID does not exist" containerID="3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.784181 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd"} err="failed to get container status \"3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd\": rpc error: code = NotFound desc = could not find container \"3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd\": container with ID starting with 3e81580cb8e5ab9b3ff1cff4241c34226860d82d2b0bb15f5acd1aba07386dfd not found: ID does not exist" Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.789622 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-c9k6l"] Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.793521 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-c9k6l"] Nov 21 19:15:25 crc kubenswrapper[4701]: I1121 19:15:25.962262 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b505e480-5a66-4c2f-92a5-dc67d964e6b5" path="/var/lib/kubelet/pods/b505e480-5a66-4c2f-92a5-dc67d964e6b5/volumes" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.344558 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nv4c8"] Nov 21 19:15:31 crc kubenswrapper[4701]: E1121 19:15:31.345292 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b505e480-5a66-4c2f-92a5-dc67d964e6b5" containerName="registry-server" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.345310 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b505e480-5a66-4c2f-92a5-dc67d964e6b5" containerName="registry-server" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.345461 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b505e480-5a66-4c2f-92a5-dc67d964e6b5" containerName="registry-server" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.346680 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.362946 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r59dd\" (UniqueName: \"kubernetes.io/projected/c744748b-c844-40e1-a1a8-7135a0f2c90d-kube-api-access-r59dd\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.363014 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-catalog-content\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.363103 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-utilities\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.369589 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nv4c8"] Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.464459 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r59dd\" (UniqueName: \"kubernetes.io/projected/c744748b-c844-40e1-a1a8-7135a0f2c90d-kube-api-access-r59dd\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.464537 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-catalog-content\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.464579 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-utilities\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.465234 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-catalog-content\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.465296 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-utilities\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.489443 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r59dd\" (UniqueName: \"kubernetes.io/projected/c744748b-c844-40e1-a1a8-7135a0f2c90d-kube-api-access-r59dd\") pod \"community-operators-nv4c8\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:31 crc kubenswrapper[4701]: I1121 19:15:31.670095 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:32 crc kubenswrapper[4701]: I1121 19:15:32.232371 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nv4c8"] Nov 21 19:15:32 crc kubenswrapper[4701]: I1121 19:15:32.823259 4701 generic.go:334] "Generic (PLEG): container finished" podID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerID="5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1" exitCode=0 Nov 21 19:15:32 crc kubenswrapper[4701]: I1121 19:15:32.823465 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nv4c8" event={"ID":"c744748b-c844-40e1-a1a8-7135a0f2c90d","Type":"ContainerDied","Data":"5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1"} Nov 21 19:15:32 crc kubenswrapper[4701]: I1121 19:15:32.826548 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nv4c8" event={"ID":"c744748b-c844-40e1-a1a8-7135a0f2c90d","Type":"ContainerStarted","Data":"9020224505d2d0f4833b1c9539a80d147f40224d1ae85bfd78b373851a72c9cf"} Nov 21 19:15:34 crc kubenswrapper[4701]: I1121 19:15:34.690184 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:34 crc kubenswrapper[4701]: I1121 19:15:34.690726 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:34 crc kubenswrapper[4701]: I1121 19:15:34.736998 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:34 crc kubenswrapper[4701]: I1121 19:15:34.847739 4701 generic.go:334] "Generic (PLEG): container finished" podID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerID="7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf" exitCode=0 Nov 21 19:15:34 crc kubenswrapper[4701]: I1121 19:15:34.847827 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nv4c8" event={"ID":"c744748b-c844-40e1-a1a8-7135a0f2c90d","Type":"ContainerDied","Data":"7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf"} Nov 21 19:15:34 crc kubenswrapper[4701]: I1121 19:15:34.890547 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-hm4sz" Nov 21 19:15:35 crc kubenswrapper[4701]: I1121 19:15:35.861372 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nv4c8" event={"ID":"c744748b-c844-40e1-a1a8-7135a0f2c90d","Type":"ContainerStarted","Data":"2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8"} Nov 21 19:15:35 crc kubenswrapper[4701]: I1121 19:15:35.894855 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nv4c8" podStartSLOduration=2.492337844 podStartE2EDuration="4.894824692s" podCreationTimestamp="2025-11-21 19:15:31 +0000 UTC" firstStartedPulling="2025-11-21 19:15:32.82531643 +0000 UTC m=+823.610456457" lastFinishedPulling="2025-11-21 19:15:35.227803288 +0000 UTC m=+826.012943305" observedRunningTime="2025-11-21 19:15:35.885688685 +0000 UTC m=+826.670828752" watchObservedRunningTime="2025-11-21 19:15:35.894824692 +0000 UTC m=+826.679964759" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.840624 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98"] Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.842994 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.846807 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9plwg" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.862552 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98"] Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.863553 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-util\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.863709 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-bundle\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.863934 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhclr\" (UniqueName: \"kubernetes.io/projected/c35200a2-6f14-4b98-b227-d93f103b9d76-kube-api-access-xhclr\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.964939 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-util\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.965016 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-bundle\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.965099 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhclr\" (UniqueName: \"kubernetes.io/projected/c35200a2-6f14-4b98-b227-d93f103b9d76-kube-api-access-xhclr\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.965645 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-util\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:36 crc kubenswrapper[4701]: I1121 19:15:36.965760 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-bundle\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:37 crc kubenswrapper[4701]: I1121 19:15:37.003133 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhclr\" (UniqueName: \"kubernetes.io/projected/c35200a2-6f14-4b98-b227-d93f103b9d76-kube-api-access-xhclr\") pod \"73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:37 crc kubenswrapper[4701]: I1121 19:15:37.163547 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:37 crc kubenswrapper[4701]: I1121 19:15:37.678076 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98"] Nov 21 19:15:37 crc kubenswrapper[4701]: I1121 19:15:37.876966 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" event={"ID":"c35200a2-6f14-4b98-b227-d93f103b9d76","Type":"ContainerStarted","Data":"b2c1c3fbee1ee79b70920579bfc5a2029822cd86b4b5829b5bdfdffcd3434e51"} Nov 21 19:15:38 crc kubenswrapper[4701]: I1121 19:15:38.888692 4701 generic.go:334] "Generic (PLEG): container finished" podID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerID="e8e2edf46d518600ff72f7b6eeec18d32e913763f030721efec95d22b9e3966d" exitCode=0 Nov 21 19:15:38 crc kubenswrapper[4701]: I1121 19:15:38.888783 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" event={"ID":"c35200a2-6f14-4b98-b227-d93f103b9d76","Type":"ContainerDied","Data":"e8e2edf46d518600ff72f7b6eeec18d32e913763f030721efec95d22b9e3966d"} Nov 21 19:15:39 crc kubenswrapper[4701]: I1121 19:15:39.897265 4701 generic.go:334] "Generic (PLEG): container finished" podID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerID="f72345ec52b73254684d6bee0e6aee5edf2347afe28b63e2e73a555efa3871e3" exitCode=0 Nov 21 19:15:39 crc kubenswrapper[4701]: I1121 19:15:39.897361 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" event={"ID":"c35200a2-6f14-4b98-b227-d93f103b9d76","Type":"ContainerDied","Data":"f72345ec52b73254684d6bee0e6aee5edf2347afe28b63e2e73a555efa3871e3"} Nov 21 19:15:40 crc kubenswrapper[4701]: I1121 19:15:40.909147 4701 generic.go:334] "Generic (PLEG): container finished" podID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerID="6e0a0618df875d7e803be6f8bee4ff0b75c383148d5eb45dc6c879be809cea68" exitCode=0 Nov 21 19:15:40 crc kubenswrapper[4701]: I1121 19:15:40.909260 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" event={"ID":"c35200a2-6f14-4b98-b227-d93f103b9d76","Type":"ContainerDied","Data":"6e0a0618df875d7e803be6f8bee4ff0b75c383148d5eb45dc6c879be809cea68"} Nov 21 19:15:41 crc kubenswrapper[4701]: I1121 19:15:41.670501 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:41 crc kubenswrapper[4701]: I1121 19:15:41.670584 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:41 crc kubenswrapper[4701]: I1121 19:15:41.744374 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.001794 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.277319 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.349658 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhclr\" (UniqueName: \"kubernetes.io/projected/c35200a2-6f14-4b98-b227-d93f103b9d76-kube-api-access-xhclr\") pod \"c35200a2-6f14-4b98-b227-d93f103b9d76\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.349852 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-util\") pod \"c35200a2-6f14-4b98-b227-d93f103b9d76\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.349893 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-bundle\") pod \"c35200a2-6f14-4b98-b227-d93f103b9d76\" (UID: \"c35200a2-6f14-4b98-b227-d93f103b9d76\") " Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.351152 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-bundle" (OuterVolumeSpecName: "bundle") pod "c35200a2-6f14-4b98-b227-d93f103b9d76" (UID: "c35200a2-6f14-4b98-b227-d93f103b9d76"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.359524 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c35200a2-6f14-4b98-b227-d93f103b9d76-kube-api-access-xhclr" (OuterVolumeSpecName: "kube-api-access-xhclr") pod "c35200a2-6f14-4b98-b227-d93f103b9d76" (UID: "c35200a2-6f14-4b98-b227-d93f103b9d76"). InnerVolumeSpecName "kube-api-access-xhclr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.370856 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-util" (OuterVolumeSpecName: "util") pod "c35200a2-6f14-4b98-b227-d93f103b9d76" (UID: "c35200a2-6f14-4b98-b227-d93f103b9d76"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.451604 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhclr\" (UniqueName: \"kubernetes.io/projected/c35200a2-6f14-4b98-b227-d93f103b9d76-kube-api-access-xhclr\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.451652 4701 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-util\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.451667 4701 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c35200a2-6f14-4b98-b227-d93f103b9d76-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.927705 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" event={"ID":"c35200a2-6f14-4b98-b227-d93f103b9d76","Type":"ContainerDied","Data":"b2c1c3fbee1ee79b70920579bfc5a2029822cd86b4b5829b5bdfdffcd3434e51"} Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.928231 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2c1c3fbee1ee79b70920579bfc5a2029822cd86b4b5829b5bdfdffcd3434e51" Nov 21 19:15:42 crc kubenswrapper[4701]: I1121 19:15:42.927767 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98" Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.338564 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nv4c8"] Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.338960 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nv4c8" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="registry-server" containerID="cri-o://2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8" gracePeriod=2 Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.888936 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.949291 4701 generic.go:334] "Generic (PLEG): container finished" podID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerID="2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8" exitCode=0 Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.949364 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nv4c8" event={"ID":"c744748b-c844-40e1-a1a8-7135a0f2c90d","Type":"ContainerDied","Data":"2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8"} Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.949414 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nv4c8" Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.949436 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nv4c8" event={"ID":"c744748b-c844-40e1-a1a8-7135a0f2c90d","Type":"ContainerDied","Data":"9020224505d2d0f4833b1c9539a80d147f40224d1ae85bfd78b373851a72c9cf"} Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.949457 4701 scope.go:117] "RemoveContainer" containerID="2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8" Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.972664 4701 scope.go:117] "RemoveContainer" containerID="7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf" Nov 21 19:15:44 crc kubenswrapper[4701]: I1121 19:15:44.993237 4701 scope.go:117] "RemoveContainer" containerID="5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.008332 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r59dd\" (UniqueName: \"kubernetes.io/projected/c744748b-c844-40e1-a1a8-7135a0f2c90d-kube-api-access-r59dd\") pod \"c744748b-c844-40e1-a1a8-7135a0f2c90d\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.008581 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-catalog-content\") pod \"c744748b-c844-40e1-a1a8-7135a0f2c90d\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.008650 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-utilities\") pod \"c744748b-c844-40e1-a1a8-7135a0f2c90d\" (UID: \"c744748b-c844-40e1-a1a8-7135a0f2c90d\") " Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.010014 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-utilities" (OuterVolumeSpecName: "utilities") pod "c744748b-c844-40e1-a1a8-7135a0f2c90d" (UID: "c744748b-c844-40e1-a1a8-7135a0f2c90d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.018462 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c744748b-c844-40e1-a1a8-7135a0f2c90d-kube-api-access-r59dd" (OuterVolumeSpecName: "kube-api-access-r59dd") pod "c744748b-c844-40e1-a1a8-7135a0f2c90d" (UID: "c744748b-c844-40e1-a1a8-7135a0f2c90d"). InnerVolumeSpecName "kube-api-access-r59dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.026594 4701 scope.go:117] "RemoveContainer" containerID="2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8" Nov 21 19:15:45 crc kubenswrapper[4701]: E1121 19:15:45.027430 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8\": container with ID starting with 2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8 not found: ID does not exist" containerID="2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.027473 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8"} err="failed to get container status \"2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8\": rpc error: code = NotFound desc = could not find container \"2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8\": container with ID starting with 2f59e813d721ca0295aa49baa16f4df8025bfedb709b40a5595534dbd314f8c8 not found: ID does not exist" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.027500 4701 scope.go:117] "RemoveContainer" containerID="7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf" Nov 21 19:15:45 crc kubenswrapper[4701]: E1121 19:15:45.027847 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf\": container with ID starting with 7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf not found: ID does not exist" containerID="7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.027907 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf"} err="failed to get container status \"7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf\": rpc error: code = NotFound desc = could not find container \"7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf\": container with ID starting with 7798a47e4552ab68d748f3f6f4ef42b793fb105b16fcb6a828db8fc5c73d7eaf not found: ID does not exist" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.027949 4701 scope.go:117] "RemoveContainer" containerID="5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1" Nov 21 19:15:45 crc kubenswrapper[4701]: E1121 19:15:45.028348 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1\": container with ID starting with 5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1 not found: ID does not exist" containerID="5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.028380 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1"} err="failed to get container status \"5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1\": rpc error: code = NotFound desc = could not find container \"5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1\": container with ID starting with 5b4097b4ca0b6a63ae50dc34dcc1f904635b5447861622519d1f8fb4c673d3f1 not found: ID does not exist" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.110164 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r59dd\" (UniqueName: \"kubernetes.io/projected/c744748b-c844-40e1-a1a8-7135a0f2c90d-kube-api-access-r59dd\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.110218 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.324788 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c744748b-c844-40e1-a1a8-7135a0f2c90d" (UID: "c744748b-c844-40e1-a1a8-7135a0f2c90d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.414582 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c744748b-c844-40e1-a1a8-7135a0f2c90d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.590737 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nv4c8"] Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.595163 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nv4c8"] Nov 21 19:15:45 crc kubenswrapper[4701]: I1121 19:15:45.963866 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" path="/var/lib/kubelet/pods/c744748b-c844-40e1-a1a8-7135a0f2c90d/volumes" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034470 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6"] Nov 21 19:15:46 crc kubenswrapper[4701]: E1121 19:15:46.034771 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="extract-utilities" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034787 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="extract-utilities" Nov 21 19:15:46 crc kubenswrapper[4701]: E1121 19:15:46.034803 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="registry-server" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034811 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="registry-server" Nov 21 19:15:46 crc kubenswrapper[4701]: E1121 19:15:46.034818 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="extract" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034826 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="extract" Nov 21 19:15:46 crc kubenswrapper[4701]: E1121 19:15:46.034838 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="util" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034844 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="util" Nov 21 19:15:46 crc kubenswrapper[4701]: E1121 19:15:46.034856 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="extract-content" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034862 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="extract-content" Nov 21 19:15:46 crc kubenswrapper[4701]: E1121 19:15:46.034870 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="pull" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034875 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="pull" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.034991 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c35200a2-6f14-4b98-b227-d93f103b9d76" containerName="extract" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.035005 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c744748b-c844-40e1-a1a8-7135a0f2c90d" containerName="registry-server" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.035728 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.043381 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-qn5q7" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.124744 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlb7w\" (UniqueName: \"kubernetes.io/projected/fc60458d-83dd-4a11-b22d-6a8a7f5f01f6-kube-api-access-zlb7w\") pod \"openstack-operator-controller-operator-6f8fb57dc8-vmnj6\" (UID: \"fc60458d-83dd-4a11-b22d-6a8a7f5f01f6\") " pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.143189 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6"] Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.226498 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlb7w\" (UniqueName: \"kubernetes.io/projected/fc60458d-83dd-4a11-b22d-6a8a7f5f01f6-kube-api-access-zlb7w\") pod \"openstack-operator-controller-operator-6f8fb57dc8-vmnj6\" (UID: \"fc60458d-83dd-4a11-b22d-6a8a7f5f01f6\") " pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.269171 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlb7w\" (UniqueName: \"kubernetes.io/projected/fc60458d-83dd-4a11-b22d-6a8a7f5f01f6-kube-api-access-zlb7w\") pod \"openstack-operator-controller-operator-6f8fb57dc8-vmnj6\" (UID: \"fc60458d-83dd-4a11-b22d-6a8a7f5f01f6\") " pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.354689 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.719866 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6"] Nov 21 19:15:46 crc kubenswrapper[4701]: I1121 19:15:46.978596 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" event={"ID":"fc60458d-83dd-4a11-b22d-6a8a7f5f01f6","Type":"ContainerStarted","Data":"806c00e4ec27e70b96ef64da26a43a72f68dea7d03957b22327493ff5ba8cdf6"} Nov 21 19:15:52 crc kubenswrapper[4701]: I1121 19:15:52.027135 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" event={"ID":"fc60458d-83dd-4a11-b22d-6a8a7f5f01f6","Type":"ContainerStarted","Data":"ffe839a5821fbdbdd2fbdba46ff807da99165fb059ddbb8e11b72a1ec12b81b0"} Nov 21 19:15:54 crc kubenswrapper[4701]: I1121 19:15:54.049111 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" event={"ID":"fc60458d-83dd-4a11-b22d-6a8a7f5f01f6","Type":"ContainerStarted","Data":"a7d78f509f2c33ec598ed11dd288433adb633660eaa480cc8419b25c2c0e0dd3"} Nov 21 19:15:54 crc kubenswrapper[4701]: I1121 19:15:54.050006 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:15:54 crc kubenswrapper[4701]: I1121 19:15:54.108673 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" podStartSLOduration=1.245781138 podStartE2EDuration="8.108637764s" podCreationTimestamp="2025-11-21 19:15:46 +0000 UTC" firstStartedPulling="2025-11-21 19:15:46.730408637 +0000 UTC m=+837.515548674" lastFinishedPulling="2025-11-21 19:15:53.593265273 +0000 UTC m=+844.378405300" observedRunningTime="2025-11-21 19:15:54.10548852 +0000 UTC m=+844.890628587" watchObservedRunningTime="2025-11-21 19:15:54.108637764 +0000 UTC m=+844.893777831" Nov 21 19:15:56 crc kubenswrapper[4701]: I1121 19:15:56.358755 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-6f8fb57dc8-vmnj6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.320112 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.321779 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.326945 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gkvzg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.329354 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.350858 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.356020 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-hkjp6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.378014 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.387464 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.394461 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.396022 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.404585 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-q7pk6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.421078 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.425703 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbn6j\" (UniqueName: \"kubernetes.io/projected/4c5eabdd-f4f8-4180-be28-707592f6d24d-kube-api-access-rbn6j\") pod \"barbican-operator-controller-manager-75fb479bcc-lgvh6\" (UID: \"4c5eabdd-f4f8-4180-be28-707592f6d24d\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.425887 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqk8h\" (UniqueName: \"kubernetes.io/projected/87969819-3a91-4333-9585-72a2a27fa6c9-kube-api-access-bqk8h\") pod \"designate-operator-controller-manager-767ccfd65f-qmdtp\" (UID: \"87969819-3a91-4333-9585-72a2a27fa6c9\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.426032 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzz72\" (UniqueName: \"kubernetes.io/projected/566d8e82-b230-492d-a47b-80d2351b169e-kube-api-access-rzz72\") pod \"cinder-operator-controller-manager-6498cbf48f-2ccc7\" (UID: \"566d8e82-b230-492d-a47b-80d2351b169e\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.436388 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.437828 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.442358 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.442937 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-j584p" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.455076 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.461834 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.468141 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.469690 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.474064 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.474976 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-5jvpd" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.483032 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-t5mmr" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.486630 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.492766 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.494485 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.497242 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-6hqlg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.497516 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.522396 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.524056 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527009 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc6n5\" (UniqueName: \"kubernetes.io/projected/0c6d96e4-2798-4525-bcec-61ad137140d8-kube-api-access-vc6n5\") pod \"glance-operator-controller-manager-7969689c84-t6hz7\" (UID: \"0c6d96e4-2798-4525-bcec-61ad137140d8\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527052 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzz72\" (UniqueName: \"kubernetes.io/projected/566d8e82-b230-492d-a47b-80d2351b169e-kube-api-access-rzz72\") pod \"cinder-operator-controller-manager-6498cbf48f-2ccc7\" (UID: \"566d8e82-b230-492d-a47b-80d2351b169e\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527089 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4w9f\" (UniqueName: \"kubernetes.io/projected/ef203e45-f1b1-4a9a-9987-66bb33655a95-kube-api-access-z4w9f\") pod \"heat-operator-controller-manager-56f54d6746-c2mhg\" (UID: \"ef203e45-f1b1-4a9a-9987-66bb33655a95\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527122 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbn6j\" (UniqueName: \"kubernetes.io/projected/4c5eabdd-f4f8-4180-be28-707592f6d24d-kube-api-access-rbn6j\") pod \"barbican-operator-controller-manager-75fb479bcc-lgvh6\" (UID: \"4c5eabdd-f4f8-4180-be28-707592f6d24d\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527491 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-z29xq" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527755 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvc4p\" (UniqueName: \"kubernetes.io/projected/a76b2214-2c16-4b55-bf3d-c7bdf1019237-kube-api-access-rvc4p\") pod \"horizon-operator-controller-manager-598f69df5d-kkz2m\" (UID: \"a76b2214-2c16-4b55-bf3d-c7bdf1019237\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527793 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqk8h\" (UniqueName: \"kubernetes.io/projected/87969819-3a91-4333-9585-72a2a27fa6c9-kube-api-access-bqk8h\") pod \"designate-operator-controller-manager-767ccfd65f-qmdtp\" (UID: \"87969819-3a91-4333-9585-72a2a27fa6c9\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527827 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsnpc\" (UniqueName: \"kubernetes.io/projected/b15963ff-1822-4079-8cce-266b05a9ac47-kube-api-access-jsnpc\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.527856 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b15963ff-1822-4079-8cce-266b05a9ac47-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.537291 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.549013 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.550404 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.557589 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-97vcm" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.557778 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.574573 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzz72\" (UniqueName: \"kubernetes.io/projected/566d8e82-b230-492d-a47b-80d2351b169e-kube-api-access-rzz72\") pod \"cinder-operator-controller-manager-6498cbf48f-2ccc7\" (UID: \"566d8e82-b230-492d-a47b-80d2351b169e\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.574658 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.586618 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-467lr"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.588022 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.591589 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqk8h\" (UniqueName: \"kubernetes.io/projected/87969819-3a91-4333-9585-72a2a27fa6c9-kube-api-access-bqk8h\") pod \"designate-operator-controller-manager-767ccfd65f-qmdtp\" (UID: \"87969819-3a91-4333-9585-72a2a27fa6c9\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.592059 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-xmnpt" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.596796 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbn6j\" (UniqueName: \"kubernetes.io/projected/4c5eabdd-f4f8-4180-be28-707592f6d24d-kube-api-access-rbn6j\") pod \"barbican-operator-controller-manager-75fb479bcc-lgvh6\" (UID: \"4c5eabdd-f4f8-4180-be28-707592f6d24d\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.600687 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-467lr"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.622695 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.625974 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.628653 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.628875 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zctgj\" (UniqueName: \"kubernetes.io/projected/61444bc1-a24a-4c29-94b8-953ae2dc8621-kube-api-access-zctgj\") pod \"keystone-operator-controller-manager-7454b96578-hz5sk\" (UID: \"61444bc1-a24a-4c29-94b8-953ae2dc8621\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.628934 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmsz2\" (UniqueName: \"kubernetes.io/projected/c7b87a42-0af4-4484-845e-f2993960537c-kube-api-access-pmsz2\") pod \"ironic-operator-controller-manager-99b499f4-gg7tq\" (UID: \"c7b87a42-0af4-4484-845e-f2993960537c\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.628980 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvc4p\" (UniqueName: \"kubernetes.io/projected/a76b2214-2c16-4b55-bf3d-c7bdf1019237-kube-api-access-rvc4p\") pod \"horizon-operator-controller-manager-598f69df5d-kkz2m\" (UID: \"a76b2214-2c16-4b55-bf3d-c7bdf1019237\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.629013 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsnpc\" (UniqueName: \"kubernetes.io/projected/b15963ff-1822-4079-8cce-266b05a9ac47-kube-api-access-jsnpc\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.629087 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b15963ff-1822-4079-8cce-266b05a9ac47-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.629178 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc6n5\" (UniqueName: \"kubernetes.io/projected/0c6d96e4-2798-4525-bcec-61ad137140d8-kube-api-access-vc6n5\") pod \"glance-operator-controller-manager-7969689c84-t6hz7\" (UID: \"0c6d96e4-2798-4525-bcec-61ad137140d8\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.629311 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4w9f\" (UniqueName: \"kubernetes.io/projected/ef203e45-f1b1-4a9a-9987-66bb33655a95-kube-api-access-z4w9f\") pod \"heat-operator-controller-manager-56f54d6746-c2mhg\" (UID: \"ef203e45-f1b1-4a9a-9987-66bb33655a95\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.629419 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gccf\" (UniqueName: \"kubernetes.io/projected/bcf3ee80-4bca-445a-84aa-ef30d99b7b9a-kube-api-access-4gccf\") pod \"manila-operator-controller-manager-58f887965d-467lr\" (UID: \"bcf3ee80-4bca-445a-84aa-ef30d99b7b9a\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:16:14 crc kubenswrapper[4701]: E1121 19:16:14.629622 4701 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 21 19:16:14 crc kubenswrapper[4701]: E1121 19:16:14.629744 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b15963ff-1822-4079-8cce-266b05a9ac47-cert podName:b15963ff-1822-4079-8cce-266b05a9ac47 nodeName:}" failed. No retries permitted until 2025-11-21 19:16:15.129719315 +0000 UTC m=+865.914859342 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b15963ff-1822-4079-8cce-266b05a9ac47-cert") pod "infra-operator-controller-manager-6dd8864d7c-vbqvb" (UID: "b15963ff-1822-4079-8cce-266b05a9ac47") : secret "infra-operator-webhook-server-cert" not found Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.635007 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.636328 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.641289 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.642687 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-swzlv" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.642845 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-lvqcx" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.644456 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.653235 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.657967 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-rqz62" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.664312 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.664541 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.671806 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.672620 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4w9f\" (UniqueName: \"kubernetes.io/projected/ef203e45-f1b1-4a9a-9987-66bb33655a95-kube-api-access-z4w9f\") pod \"heat-operator-controller-manager-56f54d6746-c2mhg\" (UID: \"ef203e45-f1b1-4a9a-9987-66bb33655a95\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.673072 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.673867 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc6n5\" (UniqueName: \"kubernetes.io/projected/0c6d96e4-2798-4525-bcec-61ad137140d8-kube-api-access-vc6n5\") pod \"glance-operator-controller-manager-7969689c84-t6hz7\" (UID: \"0c6d96e4-2798-4525-bcec-61ad137140d8\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.674755 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-wbn6b" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.683208 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsnpc\" (UniqueName: \"kubernetes.io/projected/b15963ff-1822-4079-8cce-266b05a9ac47-kube-api-access-jsnpc\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.687364 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.691785 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.701047 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvc4p\" (UniqueName: \"kubernetes.io/projected/a76b2214-2c16-4b55-bf3d-c7bdf1019237-kube-api-access-rvc4p\") pod \"horizon-operator-controller-manager-598f69df5d-kkz2m\" (UID: \"a76b2214-2c16-4b55-bf3d-c7bdf1019237\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734137 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gccf\" (UniqueName: \"kubernetes.io/projected/bcf3ee80-4bca-445a-84aa-ef30d99b7b9a-kube-api-access-4gccf\") pod \"manila-operator-controller-manager-58f887965d-467lr\" (UID: \"bcf3ee80-4bca-445a-84aa-ef30d99b7b9a\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734640 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zctgj\" (UniqueName: \"kubernetes.io/projected/61444bc1-a24a-4c29-94b8-953ae2dc8621-kube-api-access-zctgj\") pod \"keystone-operator-controller-manager-7454b96578-hz5sk\" (UID: \"61444bc1-a24a-4c29-94b8-953ae2dc8621\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734666 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmsz2\" (UniqueName: \"kubernetes.io/projected/c7b87a42-0af4-4484-845e-f2993960537c-kube-api-access-pmsz2\") pod \"ironic-operator-controller-manager-99b499f4-gg7tq\" (UID: \"c7b87a42-0af4-4484-845e-f2993960537c\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734691 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd8vm\" (UniqueName: \"kubernetes.io/projected/015395c6-297a-4a90-a5fd-49dcdde237af-kube-api-access-qd8vm\") pod \"octavia-operator-controller-manager-54cfbf4c7d-qqmcw\" (UID: \"015395c6-297a-4a90-a5fd-49dcdde237af\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734735 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8jsl\" (UniqueName: \"kubernetes.io/projected/1440b54d-d3f5-46a9-b335-27a6d2031d24-kube-api-access-x8jsl\") pod \"mariadb-operator-controller-manager-54b5986bb8-rlq95\" (UID: \"1440b54d-d3f5-46a9-b335-27a6d2031d24\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734782 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjzkc\" (UniqueName: \"kubernetes.io/projected/8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d-kube-api-access-jjzkc\") pod \"neutron-operator-controller-manager-78bd47f458-rzvnf\" (UID: \"8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.734825 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m9rb\" (UniqueName: \"kubernetes.io/projected/21028817-64c6-4a7c-8427-8ee3db1dec7b-kube-api-access-5m9rb\") pod \"nova-operator-controller-manager-cfbb9c588-zrwsd\" (UID: \"21028817-64c6-4a7c-8427-8ee3db1dec7b\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.730988 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.776408 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gccf\" (UniqueName: \"kubernetes.io/projected/bcf3ee80-4bca-445a-84aa-ef30d99b7b9a-kube-api-access-4gccf\") pod \"manila-operator-controller-manager-58f887965d-467lr\" (UID: \"bcf3ee80-4bca-445a-84aa-ef30d99b7b9a\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.777673 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.778126 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.782039 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zctgj\" (UniqueName: \"kubernetes.io/projected/61444bc1-a24a-4c29-94b8-953ae2dc8621-kube-api-access-zctgj\") pod \"keystone-operator-controller-manager-7454b96578-hz5sk\" (UID: \"61444bc1-a24a-4c29-94b8-953ae2dc8621\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.782585 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmsz2\" (UniqueName: \"kubernetes.io/projected/c7b87a42-0af4-4484-845e-f2993960537c-kube-api-access-pmsz2\") pod \"ironic-operator-controller-manager-99b499f4-gg7tq\" (UID: \"c7b87a42-0af4-4484-845e-f2993960537c\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.784894 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.787515 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-4btpr" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.811383 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.828058 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.830514 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.835641 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8jsl\" (UniqueName: \"kubernetes.io/projected/1440b54d-d3f5-46a9-b335-27a6d2031d24-kube-api-access-x8jsl\") pod \"mariadb-operator-controller-manager-54b5986bb8-rlq95\" (UID: \"1440b54d-d3f5-46a9-b335-27a6d2031d24\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.835714 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjzkc\" (UniqueName: \"kubernetes.io/projected/8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d-kube-api-access-jjzkc\") pod \"neutron-operator-controller-manager-78bd47f458-rzvnf\" (UID: \"8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.835755 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m9rb\" (UniqueName: \"kubernetes.io/projected/21028817-64c6-4a7c-8427-8ee3db1dec7b-kube-api-access-5m9rb\") pod \"nova-operator-controller-manager-cfbb9c588-zrwsd\" (UID: \"21028817-64c6-4a7c-8427-8ee3db1dec7b\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.835805 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd8vm\" (UniqueName: \"kubernetes.io/projected/015395c6-297a-4a90-a5fd-49dcdde237af-kube-api-access-qd8vm\") pod \"octavia-operator-controller-manager-54cfbf4c7d-qqmcw\" (UID: \"015395c6-297a-4a90-a5fd-49dcdde237af\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.835829 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg5jt\" (UniqueName: \"kubernetes.io/projected/9a58290e-d37e-4094-8ed8-4ed701c1292c-kube-api-access-pg5jt\") pod \"ovn-operator-controller-manager-54fc5f65b7-vmsdw\" (UID: \"9a58290e-d37e-4094-8ed8-4ed701c1292c\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.848567 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.852104 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.854404 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5bkfx" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.854651 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.862564 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.865053 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.868145 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m9rb\" (UniqueName: \"kubernetes.io/projected/21028817-64c6-4a7c-8427-8ee3db1dec7b-kube-api-access-5m9rb\") pod \"nova-operator-controller-manager-cfbb9c588-zrwsd\" (UID: \"21028817-64c6-4a7c-8427-8ee3db1dec7b\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.871306 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjzkc\" (UniqueName: \"kubernetes.io/projected/8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d-kube-api-access-jjzkc\") pod \"neutron-operator-controller-manager-78bd47f458-rzvnf\" (UID: \"8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.871373 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd8vm\" (UniqueName: \"kubernetes.io/projected/015395c6-297a-4a90-a5fd-49dcdde237af-kube-api-access-qd8vm\") pod \"octavia-operator-controller-manager-54cfbf4c7d-qqmcw\" (UID: \"015395c6-297a-4a90-a5fd-49dcdde237af\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.871915 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8jsl\" (UniqueName: \"kubernetes.io/projected/1440b54d-d3f5-46a9-b335-27a6d2031d24-kube-api-access-x8jsl\") pod \"mariadb-operator-controller-manager-54b5986bb8-rlq95\" (UID: \"1440b54d-d3f5-46a9-b335-27a6d2031d24\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.876429 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.882651 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-96954"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.891067 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.890742 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.898568 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.900076 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-qdk6g" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.898774 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.907883 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.911766 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.911989 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-lsztw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.914321 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-hpzcn" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.926434 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-96954"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.928238 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.937437 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzxkl\" (UniqueName: \"kubernetes.io/projected/565f6d5b-92e9-4fc5-9c4b-9c06b8946754-kube-api-access-qzxkl\") pod \"swift-operator-controller-manager-d656998f4-pwg6n\" (UID: \"565f6d5b-92e9-4fc5-9c4b-9c06b8946754\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.937729 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tppvh\" (UniqueName: \"kubernetes.io/projected/e2fc7504-afe1-4197-a366-c765c52366b0-kube-api-access-tppvh\") pod \"placement-operator-controller-manager-5b797b8dff-96954\" (UID: \"e2fc7504-afe1-4197-a366-c765c52366b0\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.939751 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg5jt\" (UniqueName: \"kubernetes.io/projected/9a58290e-d37e-4094-8ed8-4ed701c1292c-kube-api-access-pg5jt\") pod \"ovn-operator-controller-manager-54fc5f65b7-vmsdw\" (UID: \"9a58290e-d37e-4094-8ed8-4ed701c1292c\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.939878 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhbt4\" (UniqueName: \"kubernetes.io/projected/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-kube-api-access-nhbt4\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.940063 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmsh5\" (UniqueName: \"kubernetes.io/projected/66d77e65-ca72-473d-9697-9168a951b0c9-kube-api-access-kmsh5\") pod \"telemetry-operator-controller-manager-6d4bf84b58-6d2f7\" (UID: \"66d77e65-ca72-473d-9697-9168a951b0c9\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.940319 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.942908 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.950742 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.952357 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.957782 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-cf7dv" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.958033 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.970287 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4"] Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.972260 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.972561 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg5jt\" (UniqueName: \"kubernetes.io/projected/9a58290e-d37e-4094-8ed8-4ed701c1292c-kube-api-access-pg5jt\") pod \"ovn-operator-controller-manager-54fc5f65b7-vmsdw\" (UID: \"9a58290e-d37e-4094-8ed8-4ed701c1292c\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.977949 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-jd2j9" Nov 21 19:16:14 crc kubenswrapper[4701]: I1121 19:16:14.999183 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.045183 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048070 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhbt4\" (UniqueName: \"kubernetes.io/projected/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-kube-api-access-nhbt4\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048137 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f2bg\" (UniqueName: \"kubernetes.io/projected/b571034c-9574-4a93-80e9-abbf663e6ac3-kube-api-access-7f2bg\") pod \"watcher-operator-controller-manager-5c984db885-xjww4\" (UID: \"b571034c-9574-4a93-80e9-abbf663e6ac3\") " pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048167 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8br99\" (UniqueName: \"kubernetes.io/projected/1816b847-d41a-400a-bb1d-4f7551cfd581-kube-api-access-8br99\") pod \"test-operator-controller-manager-b4c496f69-9rtf7\" (UID: \"1816b847-d41a-400a-bb1d-4f7551cfd581\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048257 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmsh5\" (UniqueName: \"kubernetes.io/projected/66d77e65-ca72-473d-9697-9168a951b0c9-kube-api-access-kmsh5\") pod \"telemetry-operator-controller-manager-6d4bf84b58-6d2f7\" (UID: \"66d77e65-ca72-473d-9697-9168a951b0c9\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048308 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048333 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzxkl\" (UniqueName: \"kubernetes.io/projected/565f6d5b-92e9-4fc5-9c4b-9c06b8946754-kube-api-access-qzxkl\") pod \"swift-operator-controller-manager-d656998f4-pwg6n\" (UID: \"565f6d5b-92e9-4fc5-9c4b-9c06b8946754\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.048362 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tppvh\" (UniqueName: \"kubernetes.io/projected/e2fc7504-afe1-4197-a366-c765c52366b0-kube-api-access-tppvh\") pod \"placement-operator-controller-manager-5b797b8dff-96954\" (UID: \"e2fc7504-afe1-4197-a366-c765c52366b0\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:15 crc kubenswrapper[4701]: E1121 19:16:15.054017 4701 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 19:16:15 crc kubenswrapper[4701]: E1121 19:16:15.054085 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-cert podName:0439f0bf-0ea9-4553-a53c-74f87b31a6a7 nodeName:}" failed. No retries permitted until 2025-11-21 19:16:15.554066411 +0000 UTC m=+866.339206438 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-cert") pod "openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" (UID: "0439f0bf-0ea9-4553-a53c-74f87b31a6a7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.082231 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.103818 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tppvh\" (UniqueName: \"kubernetes.io/projected/e2fc7504-afe1-4197-a366-c765c52366b0-kube-api-access-tppvh\") pod \"placement-operator-controller-manager-5b797b8dff-96954\" (UID: \"e2fc7504-afe1-4197-a366-c765c52366b0\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.112115 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmsh5\" (UniqueName: \"kubernetes.io/projected/66d77e65-ca72-473d-9697-9168a951b0c9-kube-api-access-kmsh5\") pod \"telemetry-operator-controller-manager-6d4bf84b58-6d2f7\" (UID: \"66d77e65-ca72-473d-9697-9168a951b0c9\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.112504 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.122841 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhbt4\" (UniqueName: \"kubernetes.io/projected/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-kube-api-access-nhbt4\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.122798 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzxkl\" (UniqueName: \"kubernetes.io/projected/565f6d5b-92e9-4fc5-9c4b-9c06b8946754-kube-api-access-qzxkl\") pod \"swift-operator-controller-manager-d656998f4-pwg6n\" (UID: \"565f6d5b-92e9-4fc5-9c4b-9c06b8946754\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.131336 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.136768 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.149332 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.155554 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-5z5x4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.155631 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.162751 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f2bg\" (UniqueName: \"kubernetes.io/projected/b571034c-9574-4a93-80e9-abbf663e6ac3-kube-api-access-7f2bg\") pod \"watcher-operator-controller-manager-5c984db885-xjww4\" (UID: \"b571034c-9574-4a93-80e9-abbf663e6ac3\") " pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.162799 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8br99\" (UniqueName: \"kubernetes.io/projected/1816b847-d41a-400a-bb1d-4f7551cfd581-kube-api-access-8br99\") pod \"test-operator-controller-manager-b4c496f69-9rtf7\" (UID: \"1816b847-d41a-400a-bb1d-4f7551cfd581\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.162837 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b15963ff-1822-4079-8cce-266b05a9ac47-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.173364 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.205995 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.206971 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8br99\" (UniqueName: \"kubernetes.io/projected/1816b847-d41a-400a-bb1d-4f7551cfd581-kube-api-access-8br99\") pod \"test-operator-controller-manager-b4c496f69-9rtf7\" (UID: \"1816b847-d41a-400a-bb1d-4f7551cfd581\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.236566 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f2bg\" (UniqueName: \"kubernetes.io/projected/b571034c-9574-4a93-80e9-abbf663e6ac3-kube-api-access-7f2bg\") pod \"watcher-operator-controller-manager-5c984db885-xjww4\" (UID: \"b571034c-9574-4a93-80e9-abbf663e6ac3\") " pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.248959 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b15963ff-1822-4079-8cce-266b05a9ac47-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-vbqvb\" (UID: \"b15963ff-1822-4079-8cce-266b05a9ac47\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.257173 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.258244 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.264787 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.264900 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtk4g\" (UniqueName: \"kubernetes.io/projected/743fbd83-3b42-4083-b06f-ae81d6294066-kube-api-access-dtk4g\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.270415 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-46kzw" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.276394 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.283095 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.314055 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.344979 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.369129 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtk4g\" (UniqueName: \"kubernetes.io/projected/743fbd83-3b42-4083-b06f-ae81d6294066-kube-api-access-dtk4g\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.369632 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xpjh\" (UniqueName: \"kubernetes.io/projected/82428420-1129-4ce6-a969-7d54bb2f0d52-kube-api-access-7xpjh\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-knf78\" (UID: \"82428420-1129-4ce6-a969-7d54bb2f0d52\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.369694 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: E1121 19:16:15.393036 4701 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 21 19:16:15 crc kubenswrapper[4701]: E1121 19:16:15.393168 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert podName:743fbd83-3b42-4083-b06f-ae81d6294066 nodeName:}" failed. No retries permitted until 2025-11-21 19:16:15.893139424 +0000 UTC m=+866.678279451 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert") pod "openstack-operator-controller-manager-7467d8c866-fgkj9" (UID: "743fbd83-3b42-4083-b06f-ae81d6294066") : secret "webhook-server-cert" not found Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.393904 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtk4g\" (UniqueName: \"kubernetes.io/projected/743fbd83-3b42-4083-b06f-ae81d6294066-kube-api-access-dtk4g\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.395415 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.436020 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.465337 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.494474 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xpjh\" (UniqueName: \"kubernetes.io/projected/82428420-1129-4ce6-a969-7d54bb2f0d52-kube-api-access-7xpjh\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-knf78\" (UID: \"82428420-1129-4ce6-a969-7d54bb2f0d52\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.520518 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.532472 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xpjh\" (UniqueName: \"kubernetes.io/projected/82428420-1129-4ce6-a969-7d54bb2f0d52-kube-api-access-7xpjh\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-knf78\" (UID: \"82428420-1129-4ce6-a969-7d54bb2f0d52\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.555667 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7"] Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.604689 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.615714 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0439f0bf-0ea9-4553-a53c-74f87b31a6a7-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-c58b4\" (UID: \"0439f0bf-0ea9-4553-a53c-74f87b31a6a7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.621345 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.668774 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.843173 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:15 crc kubenswrapper[4701]: I1121 19:16:15.912364 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:15 crc kubenswrapper[4701]: E1121 19:16:15.912552 4701 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 21 19:16:15 crc kubenswrapper[4701]: E1121 19:16:15.912624 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert podName:743fbd83-3b42-4083-b06f-ae81d6294066 nodeName:}" failed. No retries permitted until 2025-11-21 19:16:16.912605004 +0000 UTC m=+867.697745031 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert") pod "openstack-operator-controller-manager-7467d8c866-fgkj9" (UID: "743fbd83-3b42-4083-b06f-ae81d6294066") : secret "webhook-server-cert" not found Nov 21 19:16:16 crc kubenswrapper[4701]: W1121 19:16:16.056427 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7b87a42_0af4_4484_845e_f2993960537c.slice/crio-38a267fb1fdc50e361ba0608d250134caf446b2976f5b3bb79b762363c0f8c7b WatchSource:0}: Error finding container 38a267fb1fdc50e361ba0608d250134caf446b2976f5b3bb79b762363c0f8c7b: Status 404 returned error can't find the container with id 38a267fb1fdc50e361ba0608d250134caf446b2976f5b3bb79b762363c0f8c7b Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.058816 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.068758 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.084790 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.103211 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.218515 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.225710 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.294344 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.324304 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.336678 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.351785 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" event={"ID":"9a58290e-d37e-4094-8ed8-4ed701c1292c","Type":"ContainerStarted","Data":"e5cf8846fb32e9ba9c28b9967e2ded82425f9dc14ddcfacb07500607886ab9bc"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.359448 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" event={"ID":"566d8e82-b230-492d-a47b-80d2351b169e","Type":"ContainerStarted","Data":"f7ff7c81372db6750b86ae72ca8d6ac10d9c2a5db9e743b7f51f80a5d64f4d8f"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.367064 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" event={"ID":"87969819-3a91-4333-9585-72a2a27fa6c9","Type":"ContainerStarted","Data":"3e40caf8fe5e70ef1a9d03d689a74ad143815cdb1312d885629359ed9dde0c0d"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.419653 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" event={"ID":"4c5eabdd-f4f8-4180-be28-707592f6d24d","Type":"ContainerStarted","Data":"99d458585b0a3be5985637e686ffd3e3df5dfacc86f9afbd4ab0d56d316d054f"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.430273 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" event={"ID":"c7b87a42-0af4-4484-845e-f2993960537c","Type":"ContainerStarted","Data":"38a267fb1fdc50e361ba0608d250134caf446b2976f5b3bb79b762363c0f8c7b"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.461866 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7"] Nov 21 19:16:16 crc kubenswrapper[4701]: W1121 19:16:16.467506 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod015395c6_297a_4a90_a5fd_49dcdde237af.slice/crio-f710c11fd0af089250848c0216feadada04c0a2414ad8647473c7c4df44fed01 WatchSource:0}: Error finding container f710c11fd0af089250848c0216feadada04c0a2414ad8647473c7c4df44fed01: Status 404 returned error can't find the container with id f710c11fd0af089250848c0216feadada04c0a2414ad8647473c7c4df44fed01 Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.475494 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-467lr"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.478528 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" event={"ID":"0c6d96e4-2798-4525-bcec-61ad137140d8","Type":"ContainerStarted","Data":"ffbed34c4489639bad12cab64f5a68223ff5ea155b2f9b89f32556a9cd3d1096"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.486857 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.487043 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-96954"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.526266 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" event={"ID":"a76b2214-2c16-4b55-bf3d-c7bdf1019237","Type":"ContainerStarted","Data":"ad89fa402f03c9b64ffe6febee47e2624f0ea2ec6a8a293e79e9e75aaa048c57"} Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.560368 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" event={"ID":"ef203e45-f1b1-4a9a-9987-66bb33655a95","Type":"ContainerStarted","Data":"0974fa25f2661f199288f0d53265c6299f46060366917c35659d56dfd4243a78"} Nov 21 19:16:16 crc kubenswrapper[4701]: E1121 19:16:16.585920 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tppvh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b797b8dff-96954_openstack-operators(e2fc7504-afe1-4197-a366-c765c52366b0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.803636 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd"] Nov 21 19:16:16 crc kubenswrapper[4701]: E1121 19:16:16.809869 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" podUID="e2fc7504-afe1-4197-a366-c765c52366b0" Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.886834 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.902643 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.907263 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf"] Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.913402 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4"] Nov 21 19:16:16 crc kubenswrapper[4701]: E1121 19:16:16.922317 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jjzkc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-78bd47f458-rzvnf_openstack-operators(8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.944145 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.951563 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/743fbd83-3b42-4083-b06f-ae81d6294066-cert\") pod \"openstack-operator-controller-manager-7467d8c866-fgkj9\" (UID: \"743fbd83-3b42-4083-b06f-ae81d6294066\") " pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.956322 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4"] Nov 21 19:16:16 crc kubenswrapper[4701]: E1121 19:16:16.963340 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qzxkl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d656998f4-pwg6n_openstack-operators(565f6d5b-92e9-4fc5-9c4b-9c06b8946754): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:16:16 crc kubenswrapper[4701]: I1121 19:16:16.963471 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb"] Nov 21 19:16:16 crc kubenswrapper[4701]: E1121 19:16:16.963574 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.164:5001/openstack-k8s-operators/watcher-operator:363c0c52523348343cb5803c42a4795431a49b71,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7f2bg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5c984db885-xjww4_openstack-operators(b571034c-9574-4a93-80e9-abbf663e6ac3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.018357 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jsnpc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6dd8864d7c-vbqvb_openstack-operators(b15963ff-1822-4079-8cce-266b05a9ac47): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:16:17 crc kubenswrapper[4701]: W1121 19:16:17.021579 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0439f0bf_0ea9_4553_a53c_74f87b31a6a7.slice/crio-4a32426fcc9ba327c0d2e59c27e1ac7e4910874cbb7de7c39e0764b8cec4d4c5 WatchSource:0}: Error finding container 4a32426fcc9ba327c0d2e59c27e1ac7e4910874cbb7de7c39e0764b8cec4d4c5: Status 404 returned error can't find the container with id 4a32426fcc9ba327c0d2e59c27e1ac7e4910874cbb7de7c39e0764b8cec4d4c5 Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.023551 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.026276 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nhbt4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-8c7444f48-c58b4_openstack-operators(0439f0bf-0ea9-4553-a53c-74f87b31a6a7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.325893 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" podUID="8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d" Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.459617 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9"] Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.608016 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" event={"ID":"743fbd83-3b42-4083-b06f-ae81d6294066","Type":"ContainerStarted","Data":"c7eb52a72ca43fa455de866be66fb51e71449b6483084351cffbdb9c8be3b2e5"} Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.626711 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" podUID="b15963ff-1822-4079-8cce-266b05a9ac47" Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.639370 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" event={"ID":"1816b847-d41a-400a-bb1d-4f7551cfd581","Type":"ContainerStarted","Data":"f89086bc1486ebb91d9df4c255e36b69ccf055da419caca224b3a7418c577b5c"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.649821 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" event={"ID":"66d77e65-ca72-473d-9697-9168a951b0c9","Type":"ContainerStarted","Data":"3b575f0b935ee29e1726e91d05267b6b11b95cc9bf3bf27cc3f1779856e67483"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.653745 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" event={"ID":"e2fc7504-afe1-4197-a366-c765c52366b0","Type":"ContainerStarted","Data":"edad9b15506cd4ccf512ad7762c7e367966ded56d99fa8b01ad8565e1b054deb"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.653773 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" event={"ID":"e2fc7504-afe1-4197-a366-c765c52366b0","Type":"ContainerStarted","Data":"4eee49047645f4c8d28084caccc815ad81d3c78af494e5c4f437685479975bb5"} Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.658087 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" podUID="e2fc7504-afe1-4197-a366-c765c52366b0" Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.666138 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" podUID="0439f0bf-0ea9-4553-a53c-74f87b31a6a7" Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.680480 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" podUID="b571034c-9574-4a93-80e9-abbf663e6ac3" Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.680674 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" event={"ID":"82428420-1129-4ce6-a969-7d54bb2f0d52","Type":"ContainerStarted","Data":"5dd0bcd050d7f56806d35aa0879d1c9f7cc9a729bd6e152aa95fc51684d04d7d"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.708118 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" event={"ID":"61444bc1-a24a-4c29-94b8-953ae2dc8621","Type":"ContainerStarted","Data":"e0ba34f69e41044b7b1ad7928a2866f5d1aa5410621c071519c445c0a33f3b64"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.755659 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" event={"ID":"21028817-64c6-4a7c-8427-8ee3db1dec7b","Type":"ContainerStarted","Data":"c74e961c7f577e51ffef4be39d08ed587c6bb7982392c2c1f294e4c7087667fc"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.801264 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" event={"ID":"8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d","Type":"ContainerStarted","Data":"4de5bb29e63006de81cb65f540f50e6882826ce0cf17a1a11738921649e88597"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.801321 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" event={"ID":"8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d","Type":"ContainerStarted","Data":"fde23f1a7d809b495c75afe132021b60733e9a9dff0fca808f7cdb8c0c49b16d"} Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.817052 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" podUID="8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d" Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.829197 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" event={"ID":"bcf3ee80-4bca-445a-84aa-ef30d99b7b9a","Type":"ContainerStarted","Data":"f697516fcbce8ad8d65850b98e9e0e5058f835c6e7ec86f2715acf3bc68c4493"} Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.876567 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" event={"ID":"565f6d5b-92e9-4fc5-9c4b-9c06b8946754","Type":"ContainerStarted","Data":"f3e7c429f2208fad2935c95d0a11ee4867f660af8a331c17f7afbb585665f2ca"} Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.885645 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" podUID="565f6d5b-92e9-4fc5-9c4b-9c06b8946754" Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.911780 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" event={"ID":"b571034c-9574-4a93-80e9-abbf663e6ac3","Type":"ContainerStarted","Data":"ab8f1d43d8ef9da75648b6a633225c025b45c5c81c7abb8218bc4abb9e473d02"} Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.919333 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/openstack-k8s-operators/watcher-operator:363c0c52523348343cb5803c42a4795431a49b71\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" podUID="b571034c-9574-4a93-80e9-abbf663e6ac3" Nov 21 19:16:17 crc kubenswrapper[4701]: I1121 19:16:17.958934 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" event={"ID":"0439f0bf-0ea9-4553-a53c-74f87b31a6a7","Type":"ContainerStarted","Data":"4a32426fcc9ba327c0d2e59c27e1ac7e4910874cbb7de7c39e0764b8cec4d4c5"} Nov 21 19:16:17 crc kubenswrapper[4701]: E1121 19:16:17.966470 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" podUID="0439f0bf-0ea9-4553-a53c-74f87b31a6a7" Nov 21 19:16:18 crc kubenswrapper[4701]: I1121 19:16:18.005289 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" event={"ID":"015395c6-297a-4a90-a5fd-49dcdde237af","Type":"ContainerStarted","Data":"f710c11fd0af089250848c0216feadada04c0a2414ad8647473c7c4df44fed01"} Nov 21 19:16:18 crc kubenswrapper[4701]: I1121 19:16:18.041661 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" event={"ID":"b15963ff-1822-4079-8cce-266b05a9ac47","Type":"ContainerStarted","Data":"4bab4ec881f0d41cd82ef9863a6544c33ba9c07310e6f0fd8f9959a44b685e1d"} Nov 21 19:16:18 crc kubenswrapper[4701]: E1121 19:16:18.047053 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" podUID="b15963ff-1822-4079-8cce-266b05a9ac47" Nov 21 19:16:18 crc kubenswrapper[4701]: I1121 19:16:18.048839 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" event={"ID":"1440b54d-d3f5-46a9-b335-27a6d2031d24","Type":"ContainerStarted","Data":"8f47cb454fdd8b71158defce87222eeb6e98afc5b812e94421daf2d58713c045"} Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.073553 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" event={"ID":"743fbd83-3b42-4083-b06f-ae81d6294066","Type":"ContainerStarted","Data":"10dc54937e0ccf4b9e8df21b9cd7452325a2cd4ee61faaed6e7c24357a0ab08f"} Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.073609 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" event={"ID":"743fbd83-3b42-4083-b06f-ae81d6294066","Type":"ContainerStarted","Data":"f7019970672aa117d9e2ea6272bce200f6519c9b7f274310f286e746d89ec40c"} Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.074666 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.105062 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" event={"ID":"b15963ff-1822-4079-8cce-266b05a9ac47","Type":"ContainerStarted","Data":"3dae20b49ae4512cbf69e6f64eb93a12325f98004126ddd9e34a9c273ed47a13"} Nov 21 19:16:19 crc kubenswrapper[4701]: E1121 19:16:19.107966 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" podUID="b15963ff-1822-4079-8cce-266b05a9ac47" Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.109826 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" event={"ID":"565f6d5b-92e9-4fc5-9c4b-9c06b8946754","Type":"ContainerStarted","Data":"5b637ee30fd03156477257783ab4b17a08d69d2e96d45e32f5fb03ef5c48fb86"} Nov 21 19:16:19 crc kubenswrapper[4701]: E1121 19:16:19.113138 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" podUID="565f6d5b-92e9-4fc5-9c4b-9c06b8946754" Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.113268 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" event={"ID":"b571034c-9574-4a93-80e9-abbf663e6ac3","Type":"ContainerStarted","Data":"06527f40a3a585cf27b120826176dd29dbb60b7bc60975d6211b99adf3e8f547"} Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.115661 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" podStartSLOduration=4.115647166 podStartE2EDuration="4.115647166s" podCreationTimestamp="2025-11-21 19:16:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:16:19.114247018 +0000 UTC m=+869.899387035" watchObservedRunningTime="2025-11-21 19:16:19.115647166 +0000 UTC m=+869.900787193" Nov 21 19:16:19 crc kubenswrapper[4701]: E1121 19:16:19.117720 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/openstack-k8s-operators/watcher-operator:363c0c52523348343cb5803c42a4795431a49b71\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" podUID="b571034c-9574-4a93-80e9-abbf663e6ac3" Nov 21 19:16:19 crc kubenswrapper[4701]: I1121 19:16:19.120191 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" event={"ID":"0439f0bf-0ea9-4553-a53c-74f87b31a6a7","Type":"ContainerStarted","Data":"c37bece0e2acd7a9143471d63b76d6cd8801fbf37889119b4fb487ec0881a3a6"} Nov 21 19:16:19 crc kubenswrapper[4701]: E1121 19:16:19.125876 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" podUID="e2fc7504-afe1-4197-a366-c765c52366b0" Nov 21 19:16:19 crc kubenswrapper[4701]: E1121 19:16:19.125937 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" podUID="0439f0bf-0ea9-4553-a53c-74f87b31a6a7" Nov 21 19:16:19 crc kubenswrapper[4701]: E1121 19:16:19.127462 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" podUID="8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d" Nov 21 19:16:20 crc kubenswrapper[4701]: E1121 19:16:20.143793 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" podUID="0439f0bf-0ea9-4553-a53c-74f87b31a6a7" Nov 21 19:16:20 crc kubenswrapper[4701]: E1121 19:16:20.143857 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" podUID="b15963ff-1822-4079-8cce-266b05a9ac47" Nov 21 19:16:20 crc kubenswrapper[4701]: E1121 19:16:20.144248 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/openstack-k8s-operators/watcher-operator:363c0c52523348343cb5803c42a4795431a49b71\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" podUID="b571034c-9574-4a93-80e9-abbf663e6ac3" Nov 21 19:16:20 crc kubenswrapper[4701]: E1121 19:16:20.144312 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" podUID="565f6d5b-92e9-4fc5-9c4b-9c06b8946754" Nov 21 19:16:27 crc kubenswrapper[4701]: I1121 19:16:27.031075 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7467d8c866-fgkj9" Nov 21 19:16:30 crc kubenswrapper[4701]: E1121 19:16:30.573235 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a" Nov 21 19:16:30 crc kubenswrapper[4701]: E1121 19:16:30.573897 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4gccf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58f887965d-467lr_openstack-operators(bcf3ee80-4bca-445a-84aa-ef30d99b7b9a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:16:31 crc kubenswrapper[4701]: E1121 19:16:31.188866 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a" Nov 21 19:16:31 crc kubenswrapper[4701]: E1121 19:16:31.189656 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zctgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7454b96578-hz5sk_openstack-operators(61444bc1-a24a-4c29-94b8-953ae2dc8621): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:16:31 crc kubenswrapper[4701]: E1121 19:16:31.984358 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9" Nov 21 19:16:31 crc kubenswrapper[4701]: E1121 19:16:31.984570 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rvc4p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-598f69df5d-kkz2m_openstack-operators(a76b2214-2c16-4b55-bf3d-c7bdf1019237): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:16:32 crc kubenswrapper[4701]: E1121 19:16:32.667953 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 21 19:16:32 crc kubenswrapper[4701]: E1121 19:16:32.668103 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7xpjh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-knf78_openstack-operators(82428420-1129-4ce6-a969-7d54bb2f0d52): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:16:32 crc kubenswrapper[4701]: E1121 19:16:32.669643 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" podUID="82428420-1129-4ce6-a969-7d54bb2f0d52" Nov 21 19:16:32 crc kubenswrapper[4701]: E1121 19:16:32.746649 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" podUID="82428420-1129-4ce6-a969-7d54bb2f0d52" Nov 21 19:16:33 crc kubenswrapper[4701]: E1121 19:16:33.207188 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" podUID="a76b2214-2c16-4b55-bf3d-c7bdf1019237" Nov 21 19:16:33 crc kubenswrapper[4701]: E1121 19:16:33.208722 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" podUID="bcf3ee80-4bca-445a-84aa-ef30d99b7b9a" Nov 21 19:16:33 crc kubenswrapper[4701]: E1121 19:16:33.224768 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" podUID="61444bc1-a24a-4c29-94b8-953ae2dc8621" Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.755080 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" event={"ID":"87969819-3a91-4333-9585-72a2a27fa6c9","Type":"ContainerStarted","Data":"bbe0298a8b5dbe996d982a1aed5e917158ffaa12075d2b61e0643a0bdb2db189"} Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.765102 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" event={"ID":"21028817-64c6-4a7c-8427-8ee3db1dec7b","Type":"ContainerStarted","Data":"8333ad79bbc60ff6358927b2862e1b84cdaf5ee9a16f14e7bbf752c72c80b83c"} Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.768882 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" event={"ID":"9a58290e-d37e-4094-8ed8-4ed701c1292c","Type":"ContainerStarted","Data":"852c7909b946aac38a517773ec7267176d9069fc0c88de215705c8bdcfb4dcfa"} Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.770092 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" event={"ID":"ef203e45-f1b1-4a9a-9987-66bb33655a95","Type":"ContainerStarted","Data":"e625b0baaefe4a4fbf30d7bd06ec5c7e8799e2667cbb746c387ef11c65af9f64"} Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.771598 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" event={"ID":"566d8e82-b230-492d-a47b-80d2351b169e","Type":"ContainerStarted","Data":"8e091cfef7753abea50809ab395d3991070c0c8a2adb5b0d375d11e1fc2e78db"} Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.776875 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" event={"ID":"61444bc1-a24a-4c29-94b8-953ae2dc8621","Type":"ContainerStarted","Data":"8402fdc59ee135641c92ae3d9012097b2b1032679ef8a94c978ef40af50fc706"} Nov 21 19:16:33 crc kubenswrapper[4701]: E1121 19:16:33.781405 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" podUID="61444bc1-a24a-4c29-94b8-953ae2dc8621" Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.787108 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" event={"ID":"1440b54d-d3f5-46a9-b335-27a6d2031d24","Type":"ContainerStarted","Data":"ca0040062ccb68934beaa309ec2a50c4d0cf7013d1af7db31ff0010a0c33478d"} Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.788505 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" event={"ID":"a76b2214-2c16-4b55-bf3d-c7bdf1019237","Type":"ContainerStarted","Data":"9afe76bc99d5ad29e1f440d2a35c58ce6adf8f0c210adc1afd329fd2d905bcc9"} Nov 21 19:16:33 crc kubenswrapper[4701]: E1121 19:16:33.790963 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" podUID="a76b2214-2c16-4b55-bf3d-c7bdf1019237" Nov 21 19:16:33 crc kubenswrapper[4701]: I1121 19:16:33.791656 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" event={"ID":"bcf3ee80-4bca-445a-84aa-ef30d99b7b9a","Type":"ContainerStarted","Data":"5254aee5a779adeeee5b31c9d458047f98641ae9053b223c3a482b8e0a12eb92"} Nov 21 19:16:33 crc kubenswrapper[4701]: E1121 19:16:33.792516 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a\\\"\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" podUID="bcf3ee80-4bca-445a-84aa-ef30d99b7b9a" Nov 21 19:16:34 crc kubenswrapper[4701]: E1121 19:16:34.801468 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" podUID="61444bc1-a24a-4c29-94b8-953ae2dc8621" Nov 21 19:16:34 crc kubenswrapper[4701]: E1121 19:16:34.802326 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" podUID="a76b2214-2c16-4b55-bf3d-c7bdf1019237" Nov 21 19:16:34 crc kubenswrapper[4701]: E1121 19:16:34.803644 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a\\\"\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" podUID="bcf3ee80-4bca-445a-84aa-ef30d99b7b9a" Nov 21 19:16:35 crc kubenswrapper[4701]: I1121 19:16:35.816021 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" event={"ID":"1816b847-d41a-400a-bb1d-4f7551cfd581","Type":"ContainerStarted","Data":"071e12153f7023eb4f7eb4a886f0fa560ace475551aecd332c6617f05b00fbae"} Nov 21 19:16:35 crc kubenswrapper[4701]: I1121 19:16:35.825372 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" event={"ID":"015395c6-297a-4a90-a5fd-49dcdde237af","Type":"ContainerStarted","Data":"87500e0e2846ce2ce5d1075f2ab4ab219df8a6d9e09abf33e6a841d2d3c2af7d"} Nov 21 19:16:36 crc kubenswrapper[4701]: I1121 19:16:36.835983 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" event={"ID":"4c5eabdd-f4f8-4180-be28-707592f6d24d","Type":"ContainerStarted","Data":"4032edb9f80c8fe2f8ff0b3eb2ccfa60947e7bd83be564da70395844e3d40c59"} Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.870498 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" event={"ID":"c7b87a42-0af4-4484-845e-f2993960537c","Type":"ContainerStarted","Data":"a69f484b4ffa292d4be2dce4549b91eda892c0ef28374ea326fd51bdf5fffdfb"} Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.875530 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" event={"ID":"66d77e65-ca72-473d-9697-9168a951b0c9","Type":"ContainerStarted","Data":"69788f8e7e9d8d1e15cf7f77daaa6426f12fff513a88442f116dcf7fd670c65c"} Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.894585 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" event={"ID":"0c6d96e4-2798-4525-bcec-61ad137140d8","Type":"ContainerStarted","Data":"8e09ea767e61604ac2cd555d74a418dec06acf7c1f2726b2cf7755ae80267512"} Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.897484 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" event={"ID":"87969819-3a91-4333-9585-72a2a27fa6c9","Type":"ContainerStarted","Data":"b2edf1414a86c59acca93e0c9c1751779676c07256d60b66c9c01d6e5d96feb4"} Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.898953 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.902587 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" Nov 21 19:16:38 crc kubenswrapper[4701]: I1121 19:16:38.931983 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-qmdtp" podStartSLOduration=8.311785442 podStartE2EDuration="24.931962968s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.13142066 +0000 UTC m=+866.916560677" lastFinishedPulling="2025-11-21 19:16:32.751598176 +0000 UTC m=+883.536738203" observedRunningTime="2025-11-21 19:16:38.923236445 +0000 UTC m=+889.708376482" watchObservedRunningTime="2025-11-21 19:16:38.931962968 +0000 UTC m=+889.717102995" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.908232 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" event={"ID":"8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d","Type":"ContainerStarted","Data":"4b748b739912da30a3754752910ae8175e18abc977788f459e2b4a2293c8a78a"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.908738 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.910441 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" event={"ID":"b15963ff-1822-4079-8cce-266b05a9ac47","Type":"ContainerStarted","Data":"8190e6dc879ffe2fdfd068428da31ec419c2d0e9ff93c56094d2de33ab8796ee"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.910648 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.912912 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" event={"ID":"1816b847-d41a-400a-bb1d-4f7551cfd581","Type":"ContainerStarted","Data":"b820ecef9c950ba16d7a9d3abeb4f7c3ed915c5cf367366aced7e5a6ebed4c64"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.913012 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.915084 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" event={"ID":"66d77e65-ca72-473d-9697-9168a951b0c9","Type":"ContainerStarted","Data":"6c5f1435bb30fb7d505d5ab05a90a5ca35cd37821dbd3e391f022aeed5887ff7"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.915168 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.917359 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" event={"ID":"566d8e82-b230-492d-a47b-80d2351b169e","Type":"ContainerStarted","Data":"c36a1d6883267b540f9b4e1198f9b64de088916a897ad2fa3731f33891943f19"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.917567 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.919737 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" event={"ID":"21028817-64c6-4a7c-8427-8ee3db1dec7b","Type":"ContainerStarted","Data":"885425ff57b98d4dd50a32d843544246d49fa71afed2655f21586994a9d4ffee"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.919964 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.919993 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.921730 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" event={"ID":"9a58290e-d37e-4094-8ed8-4ed701c1292c","Type":"ContainerStarted","Data":"f9c784769da7c00d35d7d418c24503847b24d3ff37e3ba5fdddfc73b9f060b11"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.923539 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.923719 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.925245 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" event={"ID":"0c6d96e4-2798-4525-bcec-61ad137140d8","Type":"ContainerStarted","Data":"6888c66523e02985340cb73235dc14ce06b85d56230dea3b638c99c026f7e3ad"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.925676 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.926643 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.928276 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" event={"ID":"015395c6-297a-4a90-a5fd-49dcdde237af","Type":"ContainerStarted","Data":"cf55822ec20d3639d2df92aa6829872ceec8fdce52f61b03ea5dee258bc24bf3"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.928391 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.930183 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" event={"ID":"4c5eabdd-f4f8-4180-be28-707592f6d24d","Type":"ContainerStarted","Data":"093f810759b37c495683a0cf078d039f2ca7f0b82a16952c82a00c86e5ba24b4"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.930289 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.932324 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" event={"ID":"c7b87a42-0af4-4484-845e-f2993960537c","Type":"ContainerStarted","Data":"04fc31064ad949ccec863373912270493a07814c413b2dc585b30f0578780545"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.932385 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.934478 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" event={"ID":"e2fc7504-afe1-4197-a366-c765c52366b0","Type":"ContainerStarted","Data":"29cabf5a31005acb24b094e225767e3382cc8195cabddd7feb5b688a73108d8d"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.934692 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.936337 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" event={"ID":"0439f0bf-0ea9-4553-a53c-74f87b31a6a7","Type":"ContainerStarted","Data":"5d1898016bc4656e378f7878abe8b5db0ab14dfbdb628d8a11f8453c3c1246a1"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.936486 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.938789 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" event={"ID":"565f6d5b-92e9-4fc5-9c4b-9c06b8946754","Type":"ContainerStarted","Data":"841c7914fb99deefd75c769c874b336a13653d876ec21a9620b9708598c6e222"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.939020 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.941435 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" event={"ID":"b571034c-9574-4a93-80e9-abbf663e6ac3","Type":"ContainerStarted","Data":"3d11a2754c7492d8d2f9bf5db1bbf28528cfe0c1f6f671f22215fd176b256976"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.941618 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.943590 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" event={"ID":"1440b54d-d3f5-46a9-b335-27a6d2031d24","Type":"ContainerStarted","Data":"db0e827a13053f07be03bc4b3b25f05fa80741f7facc484713f29e1f2bbc5483"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.943853 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.948988 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" event={"ID":"ef203e45-f1b1-4a9a-9987-66bb33655a95","Type":"ContainerStarted","Data":"2a7042c933bfaa63f36dc248bbe2e2c5b287ceeeaa8e215590259f635b19d4d9"} Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.949028 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.949717 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" podStartSLOduration=4.289567561 podStartE2EDuration="25.949698553s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.92214689 +0000 UTC m=+867.707286917" lastFinishedPulling="2025-11-21 19:16:38.582277862 +0000 UTC m=+889.367417909" observedRunningTime="2025-11-21 19:16:39.947698649 +0000 UTC m=+890.732838686" watchObservedRunningTime="2025-11-21 19:16:39.949698553 +0000 UTC m=+890.734838580" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.950004 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.960547 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" Nov 21 19:16:39 crc kubenswrapper[4701]: I1121 19:16:39.977447 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-rlq95" podStartSLOduration=9.661892562 podStartE2EDuration="25.977419265s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.349515346 +0000 UTC m=+867.134655363" lastFinishedPulling="2025-11-21 19:16:32.665042039 +0000 UTC m=+883.450182066" observedRunningTime="2025-11-21 19:16:39.971882266 +0000 UTC m=+890.757022293" watchObservedRunningTime="2025-11-21 19:16:39.977419265 +0000 UTC m=+890.762559292" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.043714 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" podStartSLOduration=9.355526562 podStartE2EDuration="26.043679847s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.058481548 +0000 UTC m=+866.843621575" lastFinishedPulling="2025-11-21 19:16:32.746634833 +0000 UTC m=+883.531774860" observedRunningTime="2025-11-21 19:16:40.042909927 +0000 UTC m=+890.828049954" watchObservedRunningTime="2025-11-21 19:16:40.043679847 +0000 UTC m=+890.828819874" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.045469 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" podStartSLOduration=4.26002194 podStartE2EDuration="26.045463465s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.963387403 +0000 UTC m=+867.748527430" lastFinishedPulling="2025-11-21 19:16:38.748828918 +0000 UTC m=+889.533968955" observedRunningTime="2025-11-21 19:16:40.011337972 +0000 UTC m=+890.796477999" watchObservedRunningTime="2025-11-21 19:16:40.045463465 +0000 UTC m=+890.830603492" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.109349 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" podStartSLOduration=9.941122502 podStartE2EDuration="26.109328424s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.567785347 +0000 UTC m=+867.352925374" lastFinishedPulling="2025-11-21 19:16:32.735991269 +0000 UTC m=+883.521131296" observedRunningTime="2025-11-21 19:16:40.073975888 +0000 UTC m=+890.859115915" watchObservedRunningTime="2025-11-21 19:16:40.109328424 +0000 UTC m=+890.894468451" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.109849 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" podStartSLOduration=4.544559014 podStartE2EDuration="26.109844048s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:17.01821328 +0000 UTC m=+867.803353297" lastFinishedPulling="2025-11-21 19:16:38.583498294 +0000 UTC m=+889.368638331" observedRunningTime="2025-11-21 19:16:40.104446603 +0000 UTC m=+890.889586630" watchObservedRunningTime="2025-11-21 19:16:40.109844048 +0000 UTC m=+890.894984075" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.144171 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" podStartSLOduration=9.057075196 podStartE2EDuration="26.144147666s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:15.62955427 +0000 UTC m=+866.414694297" lastFinishedPulling="2025-11-21 19:16:32.71662674 +0000 UTC m=+883.501766767" observedRunningTime="2025-11-21 19:16:40.140480558 +0000 UTC m=+890.925620585" watchObservedRunningTime="2025-11-21 19:16:40.144147666 +0000 UTC m=+890.929287693" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.190592 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" podStartSLOduration=10.032295333 podStartE2EDuration="26.190562318s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.568125946 +0000 UTC m=+867.353265973" lastFinishedPulling="2025-11-21 19:16:32.726392931 +0000 UTC m=+883.511532958" observedRunningTime="2025-11-21 19:16:40.18464228 +0000 UTC m=+890.969782307" watchObservedRunningTime="2025-11-21 19:16:40.190562318 +0000 UTC m=+890.975702345" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.213548 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-2ccc7" podStartSLOduration=9.182557593 podStartE2EDuration="26.213523112s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:15.621006151 +0000 UTC m=+866.406146178" lastFinishedPulling="2025-11-21 19:16:32.65197167 +0000 UTC m=+883.437111697" observedRunningTime="2025-11-21 19:16:40.210304446 +0000 UTC m=+890.995444473" watchObservedRunningTime="2025-11-21 19:16:40.213523112 +0000 UTC m=+890.998663139" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.258501 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-vmsdw" podStartSLOduration=9.87823706 podStartE2EDuration="26.258475445s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.271738345 +0000 UTC m=+867.056878372" lastFinishedPulling="2025-11-21 19:16:32.65197673 +0000 UTC m=+883.437116757" observedRunningTime="2025-11-21 19:16:40.235401628 +0000 UTC m=+891.020541655" watchObservedRunningTime="2025-11-21 19:16:40.258475445 +0000 UTC m=+891.043615472" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.297804 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" podStartSLOduration=4.574589778 podStartE2EDuration="26.297784067s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:17.025639249 +0000 UTC m=+867.810779276" lastFinishedPulling="2025-11-21 19:16:38.748833538 +0000 UTC m=+889.533973565" observedRunningTime="2025-11-21 19:16:40.294248773 +0000 UTC m=+891.079388800" watchObservedRunningTime="2025-11-21 19:16:40.297784067 +0000 UTC m=+891.082924084" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.347150 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zrwsd" podStartSLOduration=10.45256327 podStartE2EDuration="26.347133058s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.854590192 +0000 UTC m=+867.639730219" lastFinishedPulling="2025-11-21 19:16:32.74915998 +0000 UTC m=+883.534300007" observedRunningTime="2025-11-21 19:16:40.343829049 +0000 UTC m=+891.128969076" watchObservedRunningTime="2025-11-21 19:16:40.347133058 +0000 UTC m=+891.132273085" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.368530 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" podStartSLOduration=9.87231479 podStartE2EDuration="26.368506239s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.287533657 +0000 UTC m=+867.072673684" lastFinishedPulling="2025-11-21 19:16:32.783725106 +0000 UTC m=+883.568865133" observedRunningTime="2025-11-21 19:16:40.365875879 +0000 UTC m=+891.151015906" watchObservedRunningTime="2025-11-21 19:16:40.368506239 +0000 UTC m=+891.153646266" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.406484 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" podStartSLOduration=10.15963136 podStartE2EDuration="26.406448895s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.501683238 +0000 UTC m=+867.286823255" lastFinishedPulling="2025-11-21 19:16:32.748500763 +0000 UTC m=+883.533640790" observedRunningTime="2025-11-21 19:16:40.395481561 +0000 UTC m=+891.180621588" watchObservedRunningTime="2025-11-21 19:16:40.406448895 +0000 UTC m=+891.191588922" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.482099 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" podStartSLOduration=4.695196915 podStartE2EDuration="26.482068259s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.963163337 +0000 UTC m=+867.748303364" lastFinishedPulling="2025-11-21 19:16:38.750034681 +0000 UTC m=+889.535174708" observedRunningTime="2025-11-21 19:16:40.474539437 +0000 UTC m=+891.259679474" watchObservedRunningTime="2025-11-21 19:16:40.482068259 +0000 UTC m=+891.267208276" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.482990 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" podStartSLOduration=4.3597848599999995 podStartE2EDuration="26.482982373s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.585722277 +0000 UTC m=+867.370862304" lastFinishedPulling="2025-11-21 19:16:38.70891977 +0000 UTC m=+889.494059817" observedRunningTime="2025-11-21 19:16:40.438665817 +0000 UTC m=+891.223805844" watchObservedRunningTime="2025-11-21 19:16:40.482982373 +0000 UTC m=+891.268122400" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.964372 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qqmcw" Nov 21 19:16:40 crc kubenswrapper[4701]: I1121 19:16:40.965382 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-b4c496f69-9rtf7" Nov 21 19:16:41 crc kubenswrapper[4701]: I1121 19:16:41.006160 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-c2mhg" podStartSLOduration=10.389540252 podStartE2EDuration="27.006134262s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.129399996 +0000 UTC m=+866.914540013" lastFinishedPulling="2025-11-21 19:16:32.745993996 +0000 UTC m=+883.531134023" observedRunningTime="2025-11-21 19:16:40.541573991 +0000 UTC m=+891.326714018" watchObservedRunningTime="2025-11-21 19:16:41.006134262 +0000 UTC m=+891.791274289" Nov 21 19:16:44 crc kubenswrapper[4701]: I1121 19:16:44.678021 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-lgvh6" Nov 21 19:16:44 crc kubenswrapper[4701]: I1121 19:16:44.781967 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7969689c84-t6hz7" Nov 21 19:16:44 crc kubenswrapper[4701]: I1121 19:16:44.869378 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-gg7tq" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.116589 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-rzvnf" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.282150 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-96954" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.318766 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d656998f4-pwg6n" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.352064 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-6d2f7" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.445929 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-vbqvb" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.476873 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5c984db885-xjww4" Nov 21 19:16:45 crc kubenswrapper[4701]: I1121 19:16:45.852008 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-c58b4" Nov 21 19:16:48 crc kubenswrapper[4701]: I1121 19:16:48.613552 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:16:48 crc kubenswrapper[4701]: I1121 19:16:48.613897 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.117832 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" event={"ID":"61444bc1-a24a-4c29-94b8-953ae2dc8621","Type":"ContainerStarted","Data":"afba0842b0e80f029fa83b1b27e670bb8cbb4ea2572fc8e7840c3938240bc0fd"} Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.118939 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.124971 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" event={"ID":"a76b2214-2c16-4b55-bf3d-c7bdf1019237","Type":"ContainerStarted","Data":"31a1727b95c23768bd6278ce387e1b9c1d0705a991d31573ed28f27185e760ba"} Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.125563 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.128421 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" event={"ID":"bcf3ee80-4bca-445a-84aa-ef30d99b7b9a","Type":"ContainerStarted","Data":"dfe836febdf5aaef9143bb1e857ac20a149ad9af853fb56c29dfcc53c355e09f"} Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.128608 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.142212 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" podStartSLOduration=2.8368260469999997 podStartE2EDuration="41.142182295s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.388330855 +0000 UTC m=+867.173470882" lastFinishedPulling="2025-11-21 19:16:54.693687093 +0000 UTC m=+905.478827130" observedRunningTime="2025-11-21 19:16:55.141422695 +0000 UTC m=+905.926562712" watchObservedRunningTime="2025-11-21 19:16:55.142182295 +0000 UTC m=+905.927322322" Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.158287 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" podStartSLOduration=2.54846283 podStartE2EDuration="41.158266105s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.083890828 +0000 UTC m=+866.869030855" lastFinishedPulling="2025-11-21 19:16:54.693694093 +0000 UTC m=+905.478834130" observedRunningTime="2025-11-21 19:16:55.15732765 +0000 UTC m=+905.942467677" watchObservedRunningTime="2025-11-21 19:16:55.158266105 +0000 UTC m=+905.943406142" Nov 21 19:16:55 crc kubenswrapper[4701]: I1121 19:16:55.179123 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" podStartSLOduration=3.027188501 podStartE2EDuration="41.179103943s" podCreationTimestamp="2025-11-21 19:16:14 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.541948936 +0000 UTC m=+867.327088963" lastFinishedPulling="2025-11-21 19:16:54.693864348 +0000 UTC m=+905.479004405" observedRunningTime="2025-11-21 19:16:55.174620833 +0000 UTC m=+905.959760860" watchObservedRunningTime="2025-11-21 19:16:55.179103943 +0000 UTC m=+905.964243970" Nov 21 19:16:56 crc kubenswrapper[4701]: I1121 19:16:56.140749 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" event={"ID":"82428420-1129-4ce6-a969-7d54bb2f0d52","Type":"ContainerStarted","Data":"54ebf5aaae8c7cb90379df342550bf13379937be75fef14ec7bb9d6be05d0bc6"} Nov 21 19:16:56 crc kubenswrapper[4701]: I1121 19:16:56.178382 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-knf78" podStartSLOduration=3.400415886 podStartE2EDuration="41.178349182s" podCreationTimestamp="2025-11-21 19:16:15 +0000 UTC" firstStartedPulling="2025-11-21 19:16:16.921485141 +0000 UTC m=+867.706625168" lastFinishedPulling="2025-11-21 19:16:54.699418397 +0000 UTC m=+905.484558464" observedRunningTime="2025-11-21 19:16:56.174959571 +0000 UTC m=+906.960099638" watchObservedRunningTime="2025-11-21 19:16:56.178349182 +0000 UTC m=+906.963489249" Nov 21 19:17:04 crc kubenswrapper[4701]: I1121 19:17:04.832773 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-kkz2m" Nov 21 19:17:04 crc kubenswrapper[4701]: I1121 19:17:04.947135 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-hz5sk" Nov 21 19:17:05 crc kubenswrapper[4701]: I1121 19:17:05.050895 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58f887965d-467lr" Nov 21 19:17:18 crc kubenswrapper[4701]: I1121 19:17:18.613865 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:17:18 crc kubenswrapper[4701]: I1121 19:17:18.614908 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.459682 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bd759bbbf-h4thw"] Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.462438 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.464797 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.465164 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.465245 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.465325 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-7zggs" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.475509 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bd759bbbf-h4thw"] Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.536748 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-866784dbf-w5cpz"] Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.538092 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.540155 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.547920 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866784dbf-w5cpz"] Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.642652 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-527w4\" (UniqueName: \"kubernetes.io/projected/272c7d8c-0292-4222-882d-c0c674c604b8-kube-api-access-527w4\") pod \"dnsmasq-dns-5bd759bbbf-h4thw\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.642731 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-dns-svc\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.642795 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhvr9\" (UniqueName: \"kubernetes.io/projected/e387a880-c597-4dfc-9c86-843b719a31b4-kube-api-access-bhvr9\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.642841 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272c7d8c-0292-4222-882d-c0c674c604b8-config\") pod \"dnsmasq-dns-5bd759bbbf-h4thw\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.642878 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-config\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.744424 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-dns-svc\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.744489 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhvr9\" (UniqueName: \"kubernetes.io/projected/e387a880-c597-4dfc-9c86-843b719a31b4-kube-api-access-bhvr9\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.744536 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272c7d8c-0292-4222-882d-c0c674c604b8-config\") pod \"dnsmasq-dns-5bd759bbbf-h4thw\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.744585 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-config\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.744660 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-527w4\" (UniqueName: \"kubernetes.io/projected/272c7d8c-0292-4222-882d-c0c674c604b8-kube-api-access-527w4\") pod \"dnsmasq-dns-5bd759bbbf-h4thw\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.746101 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272c7d8c-0292-4222-882d-c0c674c604b8-config\") pod \"dnsmasq-dns-5bd759bbbf-h4thw\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.746390 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-dns-svc\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.746764 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-config\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.766745 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhvr9\" (UniqueName: \"kubernetes.io/projected/e387a880-c597-4dfc-9c86-843b719a31b4-kube-api-access-bhvr9\") pod \"dnsmasq-dns-866784dbf-w5cpz\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.770797 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-527w4\" (UniqueName: \"kubernetes.io/projected/272c7d8c-0292-4222-882d-c0c674c604b8-kube-api-access-527w4\") pod \"dnsmasq-dns-5bd759bbbf-h4thw\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.779064 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:25 crc kubenswrapper[4701]: I1121 19:17:25.852000 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:26 crc kubenswrapper[4701]: I1121 19:17:26.163601 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866784dbf-w5cpz"] Nov 21 19:17:26 crc kubenswrapper[4701]: I1121 19:17:26.266500 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bd759bbbf-h4thw"] Nov 21 19:17:26 crc kubenswrapper[4701]: W1121 19:17:26.272490 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod272c7d8c_0292_4222_882d_c0c674c604b8.slice/crio-fea280aff9737045f8e9fedca8823c94cb23674e23a60b939a255cace12c9ca9 WatchSource:0}: Error finding container fea280aff9737045f8e9fedca8823c94cb23674e23a60b939a255cace12c9ca9: Status 404 returned error can't find the container with id fea280aff9737045f8e9fedca8823c94cb23674e23a60b939a255cace12c9ca9 Nov 21 19:17:26 crc kubenswrapper[4701]: I1121 19:17:26.469771 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" event={"ID":"e387a880-c597-4dfc-9c86-843b719a31b4","Type":"ContainerStarted","Data":"adfa7f76e1c151dcab7bca305e2efe928969ffbce722921751fc65a52bdbb029"} Nov 21 19:17:26 crc kubenswrapper[4701]: I1121 19:17:26.471146 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" event={"ID":"272c7d8c-0292-4222-882d-c0c674c604b8","Type":"ContainerStarted","Data":"fea280aff9737045f8e9fedca8823c94cb23674e23a60b939a255cace12c9ca9"} Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.633687 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bd759bbbf-h4thw"] Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.669660 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56d4f587b9-g9fx9"] Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.672525 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.686987 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56d4f587b9-g9fx9"] Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.814266 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-config\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.814329 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zddxr\" (UniqueName: \"kubernetes.io/projected/430bf048-144e-44fa-970e-10b09547c277-kube-api-access-zddxr\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.814666 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-dns-svc\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.916408 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zddxr\" (UniqueName: \"kubernetes.io/projected/430bf048-144e-44fa-970e-10b09547c277-kube-api-access-zddxr\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.916473 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-config\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.916559 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-dns-svc\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.917670 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-dns-svc\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.917695 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-config\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.931852 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866784dbf-w5cpz"] Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.943856 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zddxr\" (UniqueName: \"kubernetes.io/projected/430bf048-144e-44fa-970e-10b09547c277-kube-api-access-zddxr\") pod \"dnsmasq-dns-56d4f587b9-g9fx9\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.966132 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d7d677589-c9bgz"] Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.968460 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:29 crc kubenswrapper[4701]: I1121 19:17:29.981896 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d7d677589-c9bgz"] Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.000996 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.122459 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrdmv\" (UniqueName: \"kubernetes.io/projected/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-kube-api-access-wrdmv\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.122564 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-dns-svc\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.122594 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-config\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.227970 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrdmv\" (UniqueName: \"kubernetes.io/projected/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-kube-api-access-wrdmv\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.228036 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-dns-svc\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.228068 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-config\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.229235 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-config\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.229393 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-dns-svc\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.247531 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrdmv\" (UniqueName: \"kubernetes.io/projected/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-kube-api-access-wrdmv\") pod \"dnsmasq-dns-6d7d677589-c9bgz\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.295738 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.300651 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56d4f587b9-g9fx9"] Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.323173 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6684cc9dc7-878nj"] Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.324806 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.334196 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6684cc9dc7-878nj"] Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.431460 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sklw\" (UniqueName: \"kubernetes.io/projected/a6170b01-4aa3-4d90-a317-764721c7e08c-kube-api-access-2sklw\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.431529 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-config\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.431588 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-dns-svc\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.532977 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sklw\" (UniqueName: \"kubernetes.io/projected/a6170b01-4aa3-4d90-a317-764721c7e08c-kube-api-access-2sklw\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.533062 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-config\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.533125 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-dns-svc\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.534146 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-config\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.534392 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-dns-svc\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.550077 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sklw\" (UniqueName: \"kubernetes.io/projected/a6170b01-4aa3-4d90-a317-764721c7e08c-kube-api-access-2sklw\") pod \"dnsmasq-dns-6684cc9dc7-878nj\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.649133 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.811518 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.812959 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.815413 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.815879 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.816684 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.817536 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.817562 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-lj7hl" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.817534 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.820519 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.834998 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940609 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940691 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940724 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940764 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-config-data\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940824 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940843 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.940911 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82vq4\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-kube-api-access-82vq4\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.941073 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.941156 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.941300 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:30 crc kubenswrapper[4701]: I1121 19:17:30.941356 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.042952 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043028 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043080 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-config-data\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043140 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043164 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043313 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82vq4\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-kube-api-access-82vq4\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043369 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043404 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043465 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043506 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043520 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.043934 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.044490 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-config-data\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.044781 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.045186 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.045438 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.045698 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.049104 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.049331 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.049511 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.060008 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.065970 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82vq4\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-kube-api-access-82vq4\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.088729 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.103724 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.105128 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.108620 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.110641 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.114318 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.114550 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.115569 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.120893 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-27bkv" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.123848 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.125107 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.139804 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.247786 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248386 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248427 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248448 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248477 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn4kv\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-kube-api-access-jn4kv\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248504 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248792 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248892 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.248968 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.249083 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.249251 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.350976 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351035 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351075 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351104 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351131 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351160 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351189 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351234 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351255 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351278 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn4kv\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-kube-api-access-jn4kv\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351304 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.351504 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.352511 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.352621 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.353252 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.353544 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.353767 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.359762 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.359808 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.367110 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.372776 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn4kv\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-kube-api-access-jn4kv\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.377077 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.382618 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.436742 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.440451 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.446609 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.482760 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.482766 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.483075 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-p5xtg" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.483277 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.483431 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.483462 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.483646 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.494191 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586386 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586453 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586498 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586566 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55ngp\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-kube-api-access-55ngp\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586622 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586677 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586730 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa567817-ce17-4cb3-9e55-e14902a96420-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586766 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586851 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586898 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa567817-ce17-4cb3-9e55-e14902a96420-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.586934 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689101 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689152 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689181 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689240 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55ngp\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-kube-api-access-55ngp\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689282 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689303 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689329 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa567817-ce17-4cb3-9e55-e14902a96420-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689350 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689404 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689433 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa567817-ce17-4cb3-9e55-e14902a96420-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.689455 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.691573 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.691790 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.691880 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.692618 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.694681 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.694966 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa567817-ce17-4cb3-9e55-e14902a96420-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.694989 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.699896 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa567817-ce17-4cb3-9e55-e14902a96420-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.705034 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.707619 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa567817-ce17-4cb3-9e55-e14902a96420-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.717431 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55ngp\" (UniqueName: \"kubernetes.io/projected/fa567817-ce17-4cb3-9e55-e14902a96420-kube-api-access-55ngp\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.718552 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"fa567817-ce17-4cb3-9e55-e14902a96420\") " pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:31 crc kubenswrapper[4701]: I1121 19:17:31.799584 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.172722 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.174937 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.180271 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.180909 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.182452 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-pknb6" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.188153 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.188492 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.196998 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329406 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-kolla-config\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329513 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-config-data-default\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329553 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-operator-scripts\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329576 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2r2s\" (UniqueName: \"kubernetes.io/projected/117bcee4-5190-4738-8e03-19f77f4fb428-kube-api-access-m2r2s\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329616 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329662 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/117bcee4-5190-4738-8e03-19f77f4fb428-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.329686 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/117bcee4-5190-4738-8e03-19f77f4fb428-config-data-generated\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.331523 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/117bcee4-5190-4738-8e03-19f77f4fb428-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.434796 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-kolla-config\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.433737 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-kolla-config\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.434944 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-config-data-default\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.435894 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-config-data-default\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.434973 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-operator-scripts\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.435988 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2r2s\" (UniqueName: \"kubernetes.io/projected/117bcee4-5190-4738-8e03-19f77f4fb428-kube-api-access-m2r2s\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.436028 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.436076 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/117bcee4-5190-4738-8e03-19f77f4fb428-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.436105 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/117bcee4-5190-4738-8e03-19f77f4fb428-config-data-generated\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.436150 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/117bcee4-5190-4738-8e03-19f77f4fb428-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.436582 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/117bcee4-5190-4738-8e03-19f77f4fb428-operator-scripts\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.436925 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.437301 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/117bcee4-5190-4738-8e03-19f77f4fb428-config-data-generated\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.445975 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/117bcee4-5190-4738-8e03-19f77f4fb428-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.457153 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/117bcee4-5190-4738-8e03-19f77f4fb428-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.459363 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2r2s\" (UniqueName: \"kubernetes.io/projected/117bcee4-5190-4738-8e03-19f77f4fb428-kube-api-access-m2r2s\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.462812 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"117bcee4-5190-4738-8e03-19f77f4fb428\") " pod="openstack/openstack-galera-0" Nov 21 19:17:33 crc kubenswrapper[4701]: I1121 19:17:33.504251 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.446790 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.449258 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.454432 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-pb9pg" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.454486 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.454502 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.456373 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.475973 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592491 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592589 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592647 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592767 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6432247-ed58-4dce-98d4-4267d0122151-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592851 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6432247-ed58-4dce-98d4-4267d0122151-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592900 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.592950 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48kq5\" (UniqueName: \"kubernetes.io/projected/b6432247-ed58-4dce-98d4-4267d0122151-kube-api-access-48kq5\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.593021 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6432247-ed58-4dce-98d4-4267d0122151-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694299 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6432247-ed58-4dce-98d4-4267d0122151-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694353 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6432247-ed58-4dce-98d4-4267d0122151-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694391 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694424 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48kq5\" (UniqueName: \"kubernetes.io/projected/b6432247-ed58-4dce-98d4-4267d0122151-kube-api-access-48kq5\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694464 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6432247-ed58-4dce-98d4-4267d0122151-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694523 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694577 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.694810 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.695491 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.695668 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6432247-ed58-4dce-98d4-4267d0122151-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.695685 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.696949 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6432247-ed58-4dce-98d4-4267d0122151-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.721246 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48kq5\" (UniqueName: \"kubernetes.io/projected/b6432247-ed58-4dce-98d4-4267d0122151-kube-api-access-48kq5\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.723930 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6432247-ed58-4dce-98d4-4267d0122151-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.757250 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6432247-ed58-4dce-98d4-4267d0122151-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.766751 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b6432247-ed58-4dce-98d4-4267d0122151\") " pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.792125 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.872919 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.878447 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.881637 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.883866 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-vmmql" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.891127 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 21 19:17:34 crc kubenswrapper[4701]: I1121 19:17:34.892234 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.003994 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrxmp\" (UniqueName: \"kubernetes.io/projected/2d891f55-8791-487d-b8f9-b4183da3e720-kube-api-access-zrxmp\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.004109 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d891f55-8791-487d-b8f9-b4183da3e720-config-data\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.004133 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d891f55-8791-487d-b8f9-b4183da3e720-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.004158 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d891f55-8791-487d-b8f9-b4183da3e720-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.004185 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2d891f55-8791-487d-b8f9-b4183da3e720-kolla-config\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.108510 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d891f55-8791-487d-b8f9-b4183da3e720-config-data\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.108589 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d891f55-8791-487d-b8f9-b4183da3e720-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.108660 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d891f55-8791-487d-b8f9-b4183da3e720-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.108701 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2d891f55-8791-487d-b8f9-b4183da3e720-kolla-config\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.108733 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrxmp\" (UniqueName: \"kubernetes.io/projected/2d891f55-8791-487d-b8f9-b4183da3e720-kube-api-access-zrxmp\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.109989 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2d891f55-8791-487d-b8f9-b4183da3e720-kolla-config\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.111328 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d891f55-8791-487d-b8f9-b4183da3e720-config-data\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.113950 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d891f55-8791-487d-b8f9-b4183da3e720-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.131646 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrxmp\" (UniqueName: \"kubernetes.io/projected/2d891f55-8791-487d-b8f9-b4183da3e720-kube-api-access-zrxmp\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.141040 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d891f55-8791-487d-b8f9-b4183da3e720-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2d891f55-8791-487d-b8f9-b4183da3e720\") " pod="openstack/memcached-0" Nov 21 19:17:35 crc kubenswrapper[4701]: I1121 19:17:35.201540 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.036919 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.041819 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.046058 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-d5sbr" Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.048442 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmr4d\" (UniqueName: \"kubernetes.io/projected/600d7142-cf1a-4e30-968d-5b75a572085d-kube-api-access-jmr4d\") pod \"kube-state-metrics-0\" (UID: \"600d7142-cf1a-4e30-968d-5b75a572085d\") " pod="openstack/kube-state-metrics-0" Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.065324 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.150111 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmr4d\" (UniqueName: \"kubernetes.io/projected/600d7142-cf1a-4e30-968d-5b75a572085d-kube-api-access-jmr4d\") pod \"kube-state-metrics-0\" (UID: \"600d7142-cf1a-4e30-968d-5b75a572085d\") " pod="openstack/kube-state-metrics-0" Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.199653 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmr4d\" (UniqueName: \"kubernetes.io/projected/600d7142-cf1a-4e30-968d-5b75a572085d-kube-api-access-jmr4d\") pod \"kube-state-metrics-0\" (UID: \"600d7142-cf1a-4e30-968d-5b75a572085d\") " pod="openstack/kube-state-metrics-0" Nov 21 19:17:37 crc kubenswrapper[4701]: I1121 19:17:37.362448 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.404451 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.408976 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.416702 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.419885 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.420321 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-p9vfl" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.420697 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.422175 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.423213 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.427581 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.584407 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.584464 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.584489 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.584693 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.584906 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.584967 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rc2xm\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-kube-api-access-rc2xm\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.585141 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/79fa8bdc-6516-4a53-8e96-17a297ac82b7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.585175 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686355 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686456 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686512 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686546 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686612 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686671 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686700 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rc2xm\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-kube-api-access-rc2xm\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.686757 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/79fa8bdc-6516-4a53-8e96-17a297ac82b7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.690681 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/79fa8bdc-6516-4a53-8e96-17a297ac82b7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.692368 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.693539 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.695686 4701 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.695733 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cac188d07dbda74642d10d9af8d31d97e15d9a3dab466103b81232fdd62bf350/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.695848 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.697596 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.708626 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rc2xm\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-kube-api-access-rc2xm\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.715142 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:38 crc kubenswrapper[4701]: I1121 19:17:38.742267 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:39 crc kubenswrapper[4701]: I1121 19:17:39.035232 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.553353 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.555228 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.558695 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.560547 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.560787 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-lp2p9" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.560980 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.569926 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.591883 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727145 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76796a80-e8f7-43ed-862b-011b964a31f9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727216 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727265 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76796a80-e8f7-43ed-862b-011b964a31f9-config\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727298 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qbvb\" (UniqueName: \"kubernetes.io/projected/76796a80-e8f7-43ed-862b-011b964a31f9-kube-api-access-9qbvb\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727320 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727359 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727379 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.727403 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76796a80-e8f7-43ed-862b-011b964a31f9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.829808 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76796a80-e8f7-43ed-862b-011b964a31f9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830168 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830300 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76796a80-e8f7-43ed-862b-011b964a31f9-config\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830390 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qbvb\" (UniqueName: \"kubernetes.io/projected/76796a80-e8f7-43ed-862b-011b964a31f9-kube-api-access-9qbvb\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830483 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830577 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830658 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.830739 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76796a80-e8f7-43ed-862b-011b964a31f9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.831040 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76796a80-e8f7-43ed-862b-011b964a31f9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.831369 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76796a80-e8f7-43ed-862b-011b964a31f9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.832035 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76796a80-e8f7-43ed-862b-011b964a31f9-config\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.832297 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.839925 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.840001 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.856988 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76796a80-e8f7-43ed-862b-011b964a31f9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.870113 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:40 crc kubenswrapper[4701]: I1121 19:17:40.871390 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qbvb\" (UniqueName: \"kubernetes.io/projected/76796a80-e8f7-43ed-862b-011b964a31f9-kube-api-access-9qbvb\") pod \"ovsdbserver-nb-0\" (UID: \"76796a80-e8f7-43ed-862b-011b964a31f9\") " pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.173407 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.532048 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-49p6k"] Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.533550 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.535787 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-p55j2" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.539565 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.539744 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.622590 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-49p6k"] Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.681385 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-vqwr8"] Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.683715 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.697733 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vqwr8"] Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713116 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-run-ovn\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713189 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bce0ec-3045-405e-914b-f466321dc7ea-combined-ca-bundle\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713262 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dvh5\" (UniqueName: \"kubernetes.io/projected/d6bce0ec-3045-405e-914b-f466321dc7ea-kube-api-access-8dvh5\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713316 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6bce0ec-3045-405e-914b-f466321dc7ea-ovn-controller-tls-certs\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713381 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-run\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713403 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6bce0ec-3045-405e-914b-f466321dc7ea-scripts\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.713423 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-log-ovn\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814528 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6bce0ec-3045-405e-914b-f466321dc7ea-scripts\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814598 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-log-ovn\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814676 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-run-ovn\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814706 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1552bca-042c-4d9e-ac6f-8c8f762ac494-scripts\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814732 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-etc-ovs\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814759 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-run\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814775 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bce0ec-3045-405e-914b-f466321dc7ea-combined-ca-bundle\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814793 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dvh5\" (UniqueName: \"kubernetes.io/projected/d6bce0ec-3045-405e-914b-f466321dc7ea-kube-api-access-8dvh5\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814814 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-lib\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814841 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6bce0ec-3045-405e-914b-f466321dc7ea-ovn-controller-tls-certs\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814863 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bjw9\" (UniqueName: \"kubernetes.io/projected/c1552bca-042c-4d9e-ac6f-8c8f762ac494-kube-api-access-8bjw9\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814901 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-log\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.814923 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-run\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.815551 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-log-ovn\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.815698 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-run\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.815724 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6bce0ec-3045-405e-914b-f466321dc7ea-var-run-ovn\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.817596 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6bce0ec-3045-405e-914b-f466321dc7ea-scripts\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.819591 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6bce0ec-3045-405e-914b-f466321dc7ea-ovn-controller-tls-certs\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.820323 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bce0ec-3045-405e-914b-f466321dc7ea-combined-ca-bundle\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.835875 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dvh5\" (UniqueName: \"kubernetes.io/projected/d6bce0ec-3045-405e-914b-f466321dc7ea-kube-api-access-8dvh5\") pod \"ovn-controller-49p6k\" (UID: \"d6bce0ec-3045-405e-914b-f466321dc7ea\") " pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.916626 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1552bca-042c-4d9e-ac6f-8c8f762ac494-scripts\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.916693 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-etc-ovs\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.916738 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-run\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.916765 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-lib\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.916803 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bjw9\" (UniqueName: \"kubernetes.io/projected/c1552bca-042c-4d9e-ac6f-8c8f762ac494-kube-api-access-8bjw9\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.916844 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-log\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.917174 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-run\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.917235 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-etc-ovs\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.917334 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-log\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.917351 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c1552bca-042c-4d9e-ac6f-8c8f762ac494-var-lib\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.917833 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-49p6k" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.919515 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1552bca-042c-4d9e-ac6f-8c8f762ac494-scripts\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:41 crc kubenswrapper[4701]: I1121 19:17:41.939154 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bjw9\" (UniqueName: \"kubernetes.io/projected/c1552bca-042c-4d9e-ac6f-8c8f762ac494-kube-api-access-8bjw9\") pod \"ovn-controller-ovs-vqwr8\" (UID: \"c1552bca-042c-4d9e-ac6f-8c8f762ac494\") " pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:42 crc kubenswrapper[4701]: I1121 19:17:42.005633 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.465446 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.470535 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.486265 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.486487 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-w8spj" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.486791 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.487159 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.493152 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589239 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589345 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/fae8c560-c6a6-453e-8c64-9dca8183e5c0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589388 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589432 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfjkl\" (UniqueName: \"kubernetes.io/projected/fae8c560-c6a6-453e-8c64-9dca8183e5c0-kube-api-access-hfjkl\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589517 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fae8c560-c6a6-453e-8c64-9dca8183e5c0-config\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589600 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589689 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fae8c560-c6a6-453e-8c64-9dca8183e5c0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.589720 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691248 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691329 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/fae8c560-c6a6-453e-8c64-9dca8183e5c0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691353 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691395 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfjkl\" (UniqueName: \"kubernetes.io/projected/fae8c560-c6a6-453e-8c64-9dca8183e5c0-kube-api-access-hfjkl\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691429 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fae8c560-c6a6-453e-8c64-9dca8183e5c0-config\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691495 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691559 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fae8c560-c6a6-453e-8c64-9dca8183e5c0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.691578 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.693632 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/fae8c560-c6a6-453e-8c64-9dca8183e5c0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.693674 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.694848 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fae8c560-c6a6-453e-8c64-9dca8183e5c0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.695910 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fae8c560-c6a6-453e-8c64-9dca8183e5c0-config\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.701855 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.709349 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.719660 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fae8c560-c6a6-453e-8c64-9dca8183e5c0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.724599 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfjkl\" (UniqueName: \"kubernetes.io/projected/fae8c560-c6a6-453e-8c64-9dca8183e5c0-kube-api-access-hfjkl\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.756026 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"fae8c560-c6a6-453e-8c64-9dca8183e5c0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:45 crc kubenswrapper[4701]: I1121 19:17:45.807963 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 21 19:17:47 crc kubenswrapper[4701]: I1121 19:17:47.989900 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.326195 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.326307 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.326566 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-527w4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5bd759bbbf-h4thw_openstack(272c7d8c-0292-4222-882d-c0c674c604b8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.327795 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" podUID="272c7d8c-0292-4222-882d-c0c674c604b8" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.384952 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.385038 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.385360 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bhvr9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-866784dbf-w5cpz_openstack(e387a880-c597-4dfc-9c86-843b719a31b4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:17:48 crc kubenswrapper[4701]: E1121 19:17:48.386564 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" podUID="e387a880-c597-4dfc-9c86-843b719a31b4" Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.613942 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.614727 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.614811 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.621022 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"76a0edd10d4f17051fb2f677c020a3884e840a31cfe72eb7d10bdd5a1c9d63b1"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.621107 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://76a0edd10d4f17051fb2f677c020a3884e840a31cfe72eb7d10bdd5a1c9d63b1" gracePeriod=600 Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.756452 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2d891f55-8791-487d-b8f9-b4183da3e720","Type":"ContainerStarted","Data":"b0e1452416ff4638ab3c041479a1cb3ecf8209a2cb6120258fe312de49213d59"} Nov 21 19:17:48 crc kubenswrapper[4701]: I1121 19:17:48.901738 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 19:17:48 crc kubenswrapper[4701]: W1121 19:17:48.916284 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6432247_ed58_4dce_98d4_4267d0122151.slice/crio-d2805499fb0741e25bc456502d2bcf67fc1343d165c1d8d6eaa3dd02343c12a8 WatchSource:0}: Error finding container d2805499fb0741e25bc456502d2bcf67fc1343d165c1d8d6eaa3dd02343c12a8: Status 404 returned error can't find the container with id d2805499fb0741e25bc456502d2bcf67fc1343d165c1d8d6eaa3dd02343c12a8 Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.401373 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d7d677589-c9bgz"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.421704 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.437421 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.465609 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56d4f587b9-g9fx9"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.482282 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-49p6k"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.511346 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.717395 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.775921 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b6432247-ed58-4dce-98d4-4267d0122151","Type":"ContainerStarted","Data":"d2805499fb0741e25bc456502d2bcf67fc1343d165c1d8d6eaa3dd02343c12a8"} Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.780914 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="76a0edd10d4f17051fb2f677c020a3884e840a31cfe72eb7d10bdd5a1c9d63b1" exitCode=0 Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.780968 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"76a0edd10d4f17051fb2f677c020a3884e840a31cfe72eb7d10bdd5a1c9d63b1"} Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.780987 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"0522a5d31d2783b232fd70ced5acfdf22c3becfa61b128f650faf72c65913cd6"} Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.781023 4701 scope.go:117] "RemoveContainer" containerID="c8758f6e0ff69b0e680f67ce66823ba447806821fa55aca9dc22f0075d6645fd" Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.824926 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.923574 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6684cc9dc7-878nj"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.942081 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:17:49 crc kubenswrapper[4701]: I1121 19:17:49.949979 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.345141 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vqwr8"] Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.469573 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34a50e7c_5ef4_4882_9ecb_0b744f6d1ab3.slice/crio-2262d54691fb30547034c388144ba5bd93002a261e43134133f5c3f14bb77fe4 WatchSource:0}: Error finding container 2262d54691fb30547034c388144ba5bd93002a261e43134133f5c3f14bb77fe4: Status 404 returned error can't find the container with id 2262d54691fb30547034c388144ba5bd93002a261e43134133f5c3f14bb77fe4 Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.474270 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79fa8bdc_6516_4a53_8e96_17a297ac82b7.slice/crio-2dee2e60062e975f329ac77626d55748ee4ed70fcace3e07fcd9ea83a8eeb5d7 WatchSource:0}: Error finding container 2dee2e60062e975f329ac77626d55748ee4ed70fcace3e07fcd9ea83a8eeb5d7: Status 404 returned error can't find the container with id 2dee2e60062e975f329ac77626d55748ee4ed70fcace3e07fcd9ea83a8eeb5d7 Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.478060 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f23fd8b_e3f0_4d0c_97fb_e0c0a5a37c02.slice/crio-c9446e53b3e237c3c4a89e71c0867ff2fcbabe4608d2230c6fa8b41e098bc15a WatchSource:0}: Error finding container c9446e53b3e237c3c4a89e71c0867ff2fcbabe4608d2230c6fa8b41e098bc15a: Status 404 returned error can't find the container with id c9446e53b3e237c3c4a89e71c0867ff2fcbabe4608d2230c6fa8b41e098bc15a Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.513468 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1552bca_042c_4d9e_ac6f_8c8f762ac494.slice/crio-fbd86ac9936d3352030e6e5ba4b17702b6d72a6ae277fc5625251260455448e1 WatchSource:0}: Error finding container fbd86ac9936d3352030e6e5ba4b17702b6d72a6ae277fc5625251260455448e1: Status 404 returned error can't find the container with id fbd86ac9936d3352030e6e5ba4b17702b6d72a6ae277fc5625251260455448e1 Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.515400 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod430bf048_144e_44fa_970e_10b09547c277.slice/crio-7d97f43f2c36442b6ff4718dd1ae513053930f4cea0e478d4e04face3af71d83 WatchSource:0}: Error finding container 7d97f43f2c36442b6ff4718dd1ae513053930f4cea0e478d4e04face3af71d83: Status 404 returned error can't find the container with id 7d97f43f2c36442b6ff4718dd1ae513053930f4cea0e478d4e04face3af71d83 Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.516999 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod600d7142_cf1a_4e30_968d_5b75a572085d.slice/crio-335f58aa33577b685678dab6d24654a33e4f7acc29d44df0bce19f421dffa906 WatchSource:0}: Error finding container 335f58aa33577b685678dab6d24654a33e4f7acc29d44df0bce19f421dffa906: Status 404 returned error can't find the container with id 335f58aa33577b685678dab6d24654a33e4f7acc29d44df0bce19f421dffa906 Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.518710 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfae8c560_c6a6_453e_8c64_9dca8183e5c0.slice/crio-e4718f9e55335df58ac34a2713bf3e67a2ab122adbd010dfee627c72f9396ea6 WatchSource:0}: Error finding container e4718f9e55335df58ac34a2713bf3e67a2ab122adbd010dfee627c72f9396ea6: Status 404 returned error can't find the container with id e4718f9e55335df58ac34a2713bf3e67a2ab122adbd010dfee627c72f9396ea6 Nov 21 19:17:50 crc kubenswrapper[4701]: W1121 19:17:50.523811 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6bce0ec_3045_405e_914b_f466321dc7ea.slice/crio-df274408c0896cdee3ef53f3fc979b7bed7abd7fb861ff09021dc1022db21cd4 WatchSource:0}: Error finding container df274408c0896cdee3ef53f3fc979b7bed7abd7fb861ff09021dc1022db21cd4: Status 404 returned error can't find the container with id df274408c0896cdee3ef53f3fc979b7bed7abd7fb861ff09021dc1022db21cd4 Nov 21 19:17:50 crc kubenswrapper[4701]: E1121 19:17:50.567226 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jmr4d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(600d7142-cf1a-4e30-968d-5b75a572085d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:17:50 crc kubenswrapper[4701]: E1121 19:17:50.567183 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:38.102.83.164:5001/podified-master-centos10/openstack-ovn-controller:watcher_latest,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c9h575h77h68h5d8hd7h548hcdh549h644h5fch5fdhcch649hcch55fh55h99h668h5bbh676h5d7h5c8h65ch696hb5h5c5h98h68dh5fbh5cbhf7q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8dvh5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-49p6k_openstack(d6bce0ec-3045-405e-914b-f466321dc7ea): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 19:17:50 crc kubenswrapper[4701]: E1121 19:17:50.568748 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack/kube-state-metrics-0" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" Nov 21 19:17:50 crc kubenswrapper[4701]: E1121 19:17:50.568812 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack/ovn-controller-49p6k" podUID="d6bce0ec-3045-405e-914b-f466321dc7ea" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.660808 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.661006 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.799126 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"600d7142-cf1a-4e30-968d-5b75a572085d","Type":"ContainerStarted","Data":"335f58aa33577b685678dab6d24654a33e4f7acc29d44df0bce19f421dffa906"} Nov 21 19:17:50 crc kubenswrapper[4701]: E1121 19:17:50.804081 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.811900 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" event={"ID":"272c7d8c-0292-4222-882d-c0c674c604b8","Type":"ContainerDied","Data":"fea280aff9737045f8e9fedca8823c94cb23674e23a60b939a255cace12c9ca9"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.811982 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd759bbbf-h4thw" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.824281 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02","Type":"ContainerStarted","Data":"c9446e53b3e237c3c4a89e71c0867ff2fcbabe4608d2230c6fa8b41e098bc15a"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.828437 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerStarted","Data":"2dee2e60062e975f329ac77626d55748ee4ed70fcace3e07fcd9ea83a8eeb5d7"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.833169 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" event={"ID":"a6170b01-4aa3-4d90-a317-764721c7e08c","Type":"ContainerStarted","Data":"99c24c2c1b7ad7d72bd16a8ba6c9aabcefbdb6ad68c1282484da485418a58641"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.835394 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-49p6k" event={"ID":"d6bce0ec-3045-405e-914b-f466321dc7ea","Type":"ContainerStarted","Data":"df274408c0896cdee3ef53f3fc979b7bed7abd7fb861ff09021dc1022db21cd4"} Nov 21 19:17:50 crc kubenswrapper[4701]: E1121 19:17:50.843345 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-ovn-controller:watcher_latest\\\"\"" pod="openstack/ovn-controller-49p6k" podUID="d6bce0ec-3045-405e-914b-f466321dc7ea" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.848257 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-dns-svc\") pod \"e387a880-c597-4dfc-9c86-843b719a31b4\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.848317 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-config\") pod \"e387a880-c597-4dfc-9c86-843b719a31b4\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.848391 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272c7d8c-0292-4222-882d-c0c674c604b8-config\") pod \"272c7d8c-0292-4222-882d-c0c674c604b8\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.848470 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhvr9\" (UniqueName: \"kubernetes.io/projected/e387a880-c597-4dfc-9c86-843b719a31b4-kube-api-access-bhvr9\") pod \"e387a880-c597-4dfc-9c86-843b719a31b4\" (UID: \"e387a880-c597-4dfc-9c86-843b719a31b4\") " Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.848567 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-527w4\" (UniqueName: \"kubernetes.io/projected/272c7d8c-0292-4222-882d-c0c674c604b8-kube-api-access-527w4\") pod \"272c7d8c-0292-4222-882d-c0c674c604b8\" (UID: \"272c7d8c-0292-4222-882d-c0c674c604b8\") " Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.848936 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e387a880-c597-4dfc-9c86-843b719a31b4" (UID: "e387a880-c597-4dfc-9c86-843b719a31b4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.849236 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.849400 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/272c7d8c-0292-4222-882d-c0c674c604b8-config" (OuterVolumeSpecName: "config") pod "272c7d8c-0292-4222-882d-c0c674c604b8" (UID: "272c7d8c-0292-4222-882d-c0c674c604b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.849543 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-config" (OuterVolumeSpecName: "config") pod "e387a880-c597-4dfc-9c86-843b719a31b4" (UID: "e387a880-c597-4dfc-9c86-843b719a31b4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.853254 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e387a880-c597-4dfc-9c86-843b719a31b4-kube-api-access-bhvr9" (OuterVolumeSpecName: "kube-api-access-bhvr9") pod "e387a880-c597-4dfc-9c86-843b719a31b4" (UID: "e387a880-c597-4dfc-9c86-843b719a31b4"). InnerVolumeSpecName "kube-api-access-bhvr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.854649 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"117bcee4-5190-4738-8e03-19f77f4fb428","Type":"ContainerStarted","Data":"a30beea3d4294738a4a155ecfa6535837497b52cd8fdf63dfaa68edfd1abf5f8"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.855356 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/272c7d8c-0292-4222-882d-c0c674c604b8-kube-api-access-527w4" (OuterVolumeSpecName: "kube-api-access-527w4") pod "272c7d8c-0292-4222-882d-c0c674c604b8" (UID: "272c7d8c-0292-4222-882d-c0c674c604b8"). InnerVolumeSpecName "kube-api-access-527w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.861576 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"fae8c560-c6a6-453e-8c64-9dca8183e5c0","Type":"ContainerStarted","Data":"e4718f9e55335df58ac34a2713bf3e67a2ab122adbd010dfee627c72f9396ea6"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.865587 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" event={"ID":"e387a880-c597-4dfc-9c86-843b719a31b4","Type":"ContainerDied","Data":"adfa7f76e1c151dcab7bca305e2efe928969ffbce722921751fc65a52bdbb029"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.865690 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866784dbf-w5cpz" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.872198 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" event={"ID":"430bf048-144e-44fa-970e-10b09547c277","Type":"ContainerStarted","Data":"7d97f43f2c36442b6ff4718dd1ae513053930f4cea0e478d4e04face3af71d83"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.874473 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vqwr8" event={"ID":"c1552bca-042c-4d9e-ac6f-8c8f762ac494","Type":"ContainerStarted","Data":"fbd86ac9936d3352030e6e5ba4b17702b6d72a6ae277fc5625251260455448e1"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.881484 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" event={"ID":"cc52531a-0d4e-44e4-8f16-2aca77ecaa02","Type":"ContainerStarted","Data":"79d335e275676c894394dd186a208408986bdb0608ece428d53830e02f3fd134"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.886122 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3","Type":"ContainerStarted","Data":"2262d54691fb30547034c388144ba5bd93002a261e43134133f5c3f14bb77fe4"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.901395 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"fa567817-ce17-4cb3-9e55-e14902a96420","Type":"ContainerStarted","Data":"457e23c2263d99c55c1c077673142f2d6baa049ff3ca247d5e3e464614317397"} Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.947887 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866784dbf-w5cpz"] Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.952890 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272c7d8c-0292-4222-882d-c0c674c604b8-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.954306 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhvr9\" (UniqueName: \"kubernetes.io/projected/e387a880-c597-4dfc-9c86-843b719a31b4-kube-api-access-bhvr9\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.954320 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-527w4\" (UniqueName: \"kubernetes.io/projected/272c7d8c-0292-4222-882d-c0c674c604b8-kube-api-access-527w4\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.954332 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e387a880-c597-4dfc-9c86-843b719a31b4-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:50 crc kubenswrapper[4701]: I1121 19:17:50.961364 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-866784dbf-w5cpz"] Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.172289 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bd759bbbf-h4thw"] Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.177068 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bd759bbbf-h4thw"] Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.279300 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 19:17:51 crc kubenswrapper[4701]: W1121 19:17:51.286050 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76796a80_e8f7_43ed_862b_011b964a31f9.slice/crio-0049c905f4c3f9dc8d59e49dbc66b55bbfb6e3a91c15fa075d5af2d43124fa35 WatchSource:0}: Error finding container 0049c905f4c3f9dc8d59e49dbc66b55bbfb6e3a91c15fa075d5af2d43124fa35: Status 404 returned error can't find the container with id 0049c905f4c3f9dc8d59e49dbc66b55bbfb6e3a91c15fa075d5af2d43124fa35 Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.915432 4701 generic.go:334] "Generic (PLEG): container finished" podID="430bf048-144e-44fa-970e-10b09547c277" containerID="4cd5985a9dd7c2c0754440a33bb31083d51b77fae4f5ae14d9c23534a3968bda" exitCode=0 Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.915881 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" event={"ID":"430bf048-144e-44fa-970e-10b09547c277","Type":"ContainerDied","Data":"4cd5985a9dd7c2c0754440a33bb31083d51b77fae4f5ae14d9c23534a3968bda"} Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.920950 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"76796a80-e8f7-43ed-862b-011b964a31f9","Type":"ContainerStarted","Data":"0049c905f4c3f9dc8d59e49dbc66b55bbfb6e3a91c15fa075d5af2d43124fa35"} Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.925996 4701 generic.go:334] "Generic (PLEG): container finished" podID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerID="23e8c445997290df8ede75813727bb744fb7ba2bcfc095ffec588084793fd21f" exitCode=0 Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.926073 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" event={"ID":"cc52531a-0d4e-44e4-8f16-2aca77ecaa02","Type":"ContainerDied","Data":"23e8c445997290df8ede75813727bb744fb7ba2bcfc095ffec588084793fd21f"} Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.934616 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2d891f55-8791-487d-b8f9-b4183da3e720","Type":"ContainerStarted","Data":"7eb31a7ea7e94323a175e68811be649e8415bab922b492ffe586b2c389e0d547"} Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.934790 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.946667 4701 generic.go:334] "Generic (PLEG): container finished" podID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerID="79583820bc096aedf9c47c940c25ea0b48d1d7ee183082c7bef7d509305ff99e" exitCode=0 Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.946764 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" event={"ID":"a6170b01-4aa3-4d90-a317-764721c7e08c","Type":"ContainerDied","Data":"79583820bc096aedf9c47c940c25ea0b48d1d7ee183082c7bef7d509305ff99e"} Nov 21 19:17:51 crc kubenswrapper[4701]: E1121 19:17:51.948648 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-ovn-controller:watcher_latest\\\"\"" pod="openstack/ovn-controller-49p6k" podUID="d6bce0ec-3045-405e-914b-f466321dc7ea" Nov 21 19:17:51 crc kubenswrapper[4701]: E1121 19:17:51.956305 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.969925 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="272c7d8c-0292-4222-882d-c0c674c604b8" path="/var/lib/kubelet/pods/272c7d8c-0292-4222-882d-c0c674c604b8/volumes" Nov 21 19:17:51 crc kubenswrapper[4701]: I1121 19:17:51.970371 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e387a880-c597-4dfc-9c86-843b719a31b4" path="/var/lib/kubelet/pods/e387a880-c597-4dfc-9c86-843b719a31b4/volumes" Nov 21 19:17:52 crc kubenswrapper[4701]: I1121 19:17:52.034898 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=15.820659866 podStartE2EDuration="18.034814746s" podCreationTimestamp="2025-11-21 19:17:34 +0000 UTC" firstStartedPulling="2025-11-21 19:17:48.366241187 +0000 UTC m=+959.151381224" lastFinishedPulling="2025-11-21 19:17:50.580396077 +0000 UTC m=+961.365536104" observedRunningTime="2025-11-21 19:17:52.02372481 +0000 UTC m=+962.808864837" watchObservedRunningTime="2025-11-21 19:17:52.034814746 +0000 UTC m=+962.819954783" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.606203 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.760785 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-dns-svc\") pod \"430bf048-144e-44fa-970e-10b09547c277\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.760954 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zddxr\" (UniqueName: \"kubernetes.io/projected/430bf048-144e-44fa-970e-10b09547c277-kube-api-access-zddxr\") pod \"430bf048-144e-44fa-970e-10b09547c277\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.761105 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-config\") pod \"430bf048-144e-44fa-970e-10b09547c277\" (UID: \"430bf048-144e-44fa-970e-10b09547c277\") " Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.768973 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/430bf048-144e-44fa-970e-10b09547c277-kube-api-access-zddxr" (OuterVolumeSpecName: "kube-api-access-zddxr") pod "430bf048-144e-44fa-970e-10b09547c277" (UID: "430bf048-144e-44fa-970e-10b09547c277"). InnerVolumeSpecName "kube-api-access-zddxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.789369 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "430bf048-144e-44fa-970e-10b09547c277" (UID: "430bf048-144e-44fa-970e-10b09547c277"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.789414 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-config" (OuterVolumeSpecName: "config") pod "430bf048-144e-44fa-970e-10b09547c277" (UID: "430bf048-144e-44fa-970e-10b09547c277"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.864394 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zddxr\" (UniqueName: \"kubernetes.io/projected/430bf048-144e-44fa-970e-10b09547c277-kube-api-access-zddxr\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.864459 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.864480 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/430bf048-144e-44fa-970e-10b09547c277-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.973538 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.973592 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56d4f587b9-g9fx9" event={"ID":"430bf048-144e-44fa-970e-10b09547c277","Type":"ContainerDied","Data":"7d97f43f2c36442b6ff4718dd1ae513053930f4cea0e478d4e04face3af71d83"} Nov 21 19:17:53 crc kubenswrapper[4701]: I1121 19:17:53.973677 4701 scope.go:117] "RemoveContainer" containerID="4cd5985a9dd7c2c0754440a33bb31083d51b77fae4f5ae14d9c23534a3968bda" Nov 21 19:17:54 crc kubenswrapper[4701]: I1121 19:17:54.037258 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56d4f587b9-g9fx9"] Nov 21 19:17:54 crc kubenswrapper[4701]: I1121 19:17:54.042747 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56d4f587b9-g9fx9"] Nov 21 19:17:55 crc kubenswrapper[4701]: I1121 19:17:55.203788 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 21 19:17:55 crc kubenswrapper[4701]: I1121 19:17:55.981862 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="430bf048-144e-44fa-970e-10b09547c277" path="/var/lib/kubelet/pods/430bf048-144e-44fa-970e-10b09547c277/volumes" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.456345 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d7d677589-c9bgz"] Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.542601 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76446f9f99-vgs97"] Nov 21 19:17:57 crc kubenswrapper[4701]: E1121 19:17:57.543082 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="430bf048-144e-44fa-970e-10b09547c277" containerName="init" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.543096 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="430bf048-144e-44fa-970e-10b09547c277" containerName="init" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.543315 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="430bf048-144e-44fa-970e-10b09547c277" containerName="init" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.544306 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.554836 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76446f9f99-vgs97"] Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.651150 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-dns-svc\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.651705 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-config\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.652166 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hghxt\" (UniqueName: \"kubernetes.io/projected/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-kube-api-access-hghxt\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.754045 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-dns-svc\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.754489 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-config\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.754521 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hghxt\" (UniqueName: \"kubernetes.io/projected/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-kube-api-access-hghxt\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.755752 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-dns-svc\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.755809 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-config\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.786180 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hghxt\" (UniqueName: \"kubernetes.io/projected/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-kube-api-access-hghxt\") pod \"dnsmasq-dns-76446f9f99-vgs97\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:57 crc kubenswrapper[4701]: I1121 19:17:57.918220 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.046583 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" event={"ID":"cc52531a-0d4e-44e4-8f16-2aca77ecaa02","Type":"ContainerStarted","Data":"c716d8fd3a3a958d6a50f56edb0496829536007c47813bf18af12a842b48ba89"} Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.046719 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.046716 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerName="dnsmasq-dns" containerID="cri-o://c716d8fd3a3a958d6a50f56edb0496829536007c47813bf18af12a842b48ba89" gracePeriod=10 Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.097735 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" event={"ID":"a6170b01-4aa3-4d90-a317-764721c7e08c","Type":"ContainerStarted","Data":"67110877bcc06f9e505a88b5866543854f2e2588b4bfe0441ac53f13771a8142"} Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.098139 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.127509 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" podStartSLOduration=29.006998459 podStartE2EDuration="29.127484683s" podCreationTimestamp="2025-11-21 19:17:29 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.462689048 +0000 UTC m=+961.247829075" lastFinishedPulling="2025-11-21 19:17:50.583175272 +0000 UTC m=+961.368315299" observedRunningTime="2025-11-21 19:17:58.117347982 +0000 UTC m=+968.902488009" watchObservedRunningTime="2025-11-21 19:17:58.127484683 +0000 UTC m=+968.912624710" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.141073 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" podStartSLOduration=27.828963563 podStartE2EDuration="28.141051835s" podCreationTimestamp="2025-11-21 19:17:30 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.464954937 +0000 UTC m=+961.250094964" lastFinishedPulling="2025-11-21 19:17:50.777043199 +0000 UTC m=+961.562183236" observedRunningTime="2025-11-21 19:17:58.140586733 +0000 UTC m=+968.925726760" watchObservedRunningTime="2025-11-21 19:17:58.141051835 +0000 UTC m=+968.926191862" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.608408 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76446f9f99-vgs97"] Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.617426 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.647048 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.647283 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.650913 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.651287 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.651301 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-k2fwd" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.651593 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.752254 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qfnw\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-kube-api-access-5qfnw\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.752477 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-lock\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.752545 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.752572 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.752651 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-cache\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: W1121 19:17:58.804349 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a12e1a0_73d2_4cfe_96d2_d311378a6e3a.slice/crio-e9faca7d795029e0eb6f3006962bf2100844d101676b1386ec3c3eb9c57eedc4 WatchSource:0}: Error finding container e9faca7d795029e0eb6f3006962bf2100844d101676b1386ec3c3eb9c57eedc4: Status 404 returned error can't find the container with id e9faca7d795029e0eb6f3006962bf2100844d101676b1386ec3c3eb9c57eedc4 Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.854486 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qfnw\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-kube-api-access-5qfnw\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.854544 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-lock\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.854579 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.854600 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.854663 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-cache\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.855251 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-cache\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.855804 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-lock\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: E1121 19:17:58.855920 4701 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 19:17:58 crc kubenswrapper[4701]: E1121 19:17:58.855935 4701 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 19:17:58 crc kubenswrapper[4701]: E1121 19:17:58.855978 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift podName:bf8d5d78-fa29-41ff-94e0-6249f7e02e1b nodeName:}" failed. No retries permitted until 2025-11-21 19:17:59.355959326 +0000 UTC m=+970.141099353 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift") pod "swift-storage-0" (UID: "bf8d5d78-fa29-41ff-94e0-6249f7e02e1b") : configmap "swift-ring-files" not found Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.856409 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Nov 21 19:17:58 crc kubenswrapper[4701]: I1121 19:17:58.879534 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qfnw\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-kube-api-access-5qfnw\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.121690 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vqwr8" event={"ID":"c1552bca-042c-4d9e-ac6f-8c8f762ac494","Type":"ContainerStarted","Data":"87a89f691789d523c36ccebfdabc9fb8fd775243f06b279527bfd3af11e95710"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.129167 4701 generic.go:334] "Generic (PLEG): container finished" podID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerID="c716d8fd3a3a958d6a50f56edb0496829536007c47813bf18af12a842b48ba89" exitCode=0 Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.129342 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" event={"ID":"cc52531a-0d4e-44e4-8f16-2aca77ecaa02","Type":"ContainerDied","Data":"c716d8fd3a3a958d6a50f56edb0496829536007c47813bf18af12a842b48ba89"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.129381 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" event={"ID":"cc52531a-0d4e-44e4-8f16-2aca77ecaa02","Type":"ContainerDied","Data":"79d335e275676c894394dd186a208408986bdb0608ece428d53830e02f3fd134"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.129398 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79d335e275676c894394dd186a208408986bdb0608ece428d53830e02f3fd134" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.134739 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"fae8c560-c6a6-453e-8c64-9dca8183e5c0","Type":"ContainerStarted","Data":"f2b78e520a30787d034afb7190c53f5d7e0b8b9d94d73a1462404ec8b1ead2d5"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.138300 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.140574 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b6432247-ed58-4dce-98d4-4267d0122151","Type":"ContainerStarted","Data":"559d30179c51ba58890b6b7d53d5ea2d15e4450b68224206b02193a8506cd254"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.168560 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"76796a80-e8f7-43ed-862b-011b964a31f9","Type":"ContainerStarted","Data":"ddf26ade75c29dc5f936ea565b7a1c2dade1eb561c1c2a1eabef5082c5f095fe"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.178946 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"117bcee4-5190-4738-8e03-19f77f4fb428","Type":"ContainerStarted","Data":"72bb6c0b638d467678343a87f1a83e5a266f193fa8c192110931c51d258e5550"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.184523 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" event={"ID":"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a","Type":"ContainerStarted","Data":"e9faca7d795029e0eb6f3006962bf2100844d101676b1386ec3c3eb9c57eedc4"} Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.373924 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:17:59 crc kubenswrapper[4701]: E1121 19:17:59.374291 4701 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 19:17:59 crc kubenswrapper[4701]: E1121 19:17:59.374326 4701 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 19:17:59 crc kubenswrapper[4701]: E1121 19:17:59.374432 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift podName:bf8d5d78-fa29-41ff-94e0-6249f7e02e1b nodeName:}" failed. No retries permitted until 2025-11-21 19:18:00.374374888 +0000 UTC m=+971.159514915 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift") pod "swift-storage-0" (UID: "bf8d5d78-fa29-41ff-94e0-6249f7e02e1b") : configmap "swift-ring-files" not found Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.615146 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.680426 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-config\") pod \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.680475 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-dns-svc\") pod \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.680549 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrdmv\" (UniqueName: \"kubernetes.io/projected/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-kube-api-access-wrdmv\") pod \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\" (UID: \"cc52531a-0d4e-44e4-8f16-2aca77ecaa02\") " Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.685559 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-kube-api-access-wrdmv" (OuterVolumeSpecName: "kube-api-access-wrdmv") pod "cc52531a-0d4e-44e4-8f16-2aca77ecaa02" (UID: "cc52531a-0d4e-44e4-8f16-2aca77ecaa02"). InnerVolumeSpecName "kube-api-access-wrdmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.784123 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrdmv\" (UniqueName: \"kubernetes.io/projected/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-kube-api-access-wrdmv\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.828289 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-config" (OuterVolumeSpecName: "config") pod "cc52531a-0d4e-44e4-8f16-2aca77ecaa02" (UID: "cc52531a-0d4e-44e4-8f16-2aca77ecaa02"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.885907 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.913370 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cc52531a-0d4e-44e4-8f16-2aca77ecaa02" (UID: "cc52531a-0d4e-44e4-8f16-2aca77ecaa02"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:17:59 crc kubenswrapper[4701]: I1121 19:17:59.987841 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc52531a-0d4e-44e4-8f16-2aca77ecaa02-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.198047 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"fa567817-ce17-4cb3-9e55-e14902a96420","Type":"ContainerStarted","Data":"c9a4925d1edeed0563739e550804f57f15ef77a43f535f1feebf4723ac7ba8c6"} Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.203577 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02","Type":"ContainerStarted","Data":"22e1855bd9bfa3a229c0412c5188f4203d022bce21d3827cf5280bada0841afd"} Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.210530 4701 generic.go:334] "Generic (PLEG): container finished" podID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerID="13c69af73622b2f5c78c9510af85204af7e96e80218d27b521c54177190ea743" exitCode=0 Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.210644 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" event={"ID":"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a","Type":"ContainerDied","Data":"13c69af73622b2f5c78c9510af85204af7e96e80218d27b521c54177190ea743"} Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.213619 4701 generic.go:334] "Generic (PLEG): container finished" podID="c1552bca-042c-4d9e-ac6f-8c8f762ac494" containerID="87a89f691789d523c36ccebfdabc9fb8fd775243f06b279527bfd3af11e95710" exitCode=0 Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.213707 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vqwr8" event={"ID":"c1552bca-042c-4d9e-ac6f-8c8f762ac494","Type":"ContainerDied","Data":"87a89f691789d523c36ccebfdabc9fb8fd775243f06b279527bfd3af11e95710"} Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.218423 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7d677589-c9bgz" Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.219757 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3","Type":"ContainerStarted","Data":"e4570b453a88dbab2e9d7e0002bd8c806baddbf864333070457a2d3f8f6d6688"} Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.354275 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d7d677589-c9bgz"] Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.368778 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d7d677589-c9bgz"] Nov 21 19:18:00 crc kubenswrapper[4701]: I1121 19:18:00.398670 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:18:00 crc kubenswrapper[4701]: E1121 19:18:00.402459 4701 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 19:18:00 crc kubenswrapper[4701]: E1121 19:18:00.402766 4701 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 19:18:00 crc kubenswrapper[4701]: E1121 19:18:00.402919 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift podName:bf8d5d78-fa29-41ff-94e0-6249f7e02e1b nodeName:}" failed. No retries permitted until 2025-11-21 19:18:02.40288942 +0000 UTC m=+973.188029447 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift") pod "swift-storage-0" (UID: "bf8d5d78-fa29-41ff-94e0-6249f7e02e1b") : configmap "swift-ring-files" not found Nov 21 19:18:01 crc kubenswrapper[4701]: I1121 19:18:01.231769 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" event={"ID":"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a","Type":"ContainerStarted","Data":"72da32b1f0872c1efdfc3f478c0380c5b3bef39e4882b0c98ccf0a58efce1e19"} Nov 21 19:18:01 crc kubenswrapper[4701]: I1121 19:18:01.232791 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:18:01 crc kubenswrapper[4701]: I1121 19:18:01.235470 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerStarted","Data":"f0ca2e7b3ac945a8838678f89fec58d115b4447f42315ce05d64579930fba011"} Nov 21 19:18:01 crc kubenswrapper[4701]: I1121 19:18:01.240218 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vqwr8" event={"ID":"c1552bca-042c-4d9e-ac6f-8c8f762ac494","Type":"ContainerStarted","Data":"d84ad3496cf069f4d16e380450fa811e3ebc89e694e8cb6080d0a3d2bf854a88"} Nov 21 19:18:01 crc kubenswrapper[4701]: I1121 19:18:01.258018 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" podStartSLOduration=4.257994412 podStartE2EDuration="4.257994412s" podCreationTimestamp="2025-11-21 19:17:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:18:01.251233122 +0000 UTC m=+972.036373149" watchObservedRunningTime="2025-11-21 19:18:01.257994412 +0000 UTC m=+972.043134439" Nov 21 19:18:01 crc kubenswrapper[4701]: I1121 19:18:01.965230 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" path="/var/lib/kubelet/pods/cc52531a-0d4e-44e4-8f16-2aca77ecaa02/volumes" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.437988 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:18:02 crc kubenswrapper[4701]: E1121 19:18:02.438318 4701 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 19:18:02 crc kubenswrapper[4701]: E1121 19:18:02.438905 4701 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 19:18:02 crc kubenswrapper[4701]: E1121 19:18:02.439007 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift podName:bf8d5d78-fa29-41ff-94e0-6249f7e02e1b nodeName:}" failed. No retries permitted until 2025-11-21 19:18:06.438974925 +0000 UTC m=+977.224114982 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift") pod "swift-storage-0" (UID: "bf8d5d78-fa29-41ff-94e0-6249f7e02e1b") : configmap "swift-ring-files" not found Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.579011 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-qnhkr"] Nov 21 19:18:02 crc kubenswrapper[4701]: E1121 19:18:02.579406 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerName="dnsmasq-dns" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.579424 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerName="dnsmasq-dns" Nov 21 19:18:02 crc kubenswrapper[4701]: E1121 19:18:02.579453 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerName="init" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.579460 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerName="init" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.579613 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc52531a-0d4e-44e4-8f16-2aca77ecaa02" containerName="dnsmasq-dns" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.580295 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.588148 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.588608 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.590314 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.598804 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-qnhkr"] Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643082 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-dispersionconf\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643136 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-etc-swift\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643182 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdbss\" (UniqueName: \"kubernetes.io/projected/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-kube-api-access-zdbss\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643260 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-scripts\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643280 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-combined-ca-bundle\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643311 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-swiftconf\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.643336 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-ring-data-devices\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745635 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-dispersionconf\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745702 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-etc-swift\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745764 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdbss\" (UniqueName: \"kubernetes.io/projected/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-kube-api-access-zdbss\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745852 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-scripts\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745883 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-combined-ca-bundle\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745923 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-swiftconf\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.745953 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-ring-data-devices\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.747055 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-ring-data-devices\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.747794 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-scripts\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.748443 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-etc-swift\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.753523 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-dispersionconf\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.754821 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-combined-ca-bundle\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.757524 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-swiftconf\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.765703 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdbss\" (UniqueName: \"kubernetes.io/projected/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-kube-api-access-zdbss\") pod \"swift-ring-rebalance-qnhkr\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:02 crc kubenswrapper[4701]: I1121 19:18:02.897882 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.262124 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vqwr8" event={"ID":"c1552bca-042c-4d9e-ac6f-8c8f762ac494","Type":"ContainerStarted","Data":"1f395e4870e675172b5cda83b672aaad537424058dea1ee7966a8c56e20718c6"} Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.262673 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.262701 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.265928 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"fae8c560-c6a6-453e-8c64-9dca8183e5c0","Type":"ContainerStarted","Data":"7194b1d958b8c16f50c7e3ceb22b6be3f525886b2e617ab9f676474be33b1483"} Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.268273 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"76796a80-e8f7-43ed-862b-011b964a31f9","Type":"ContainerStarted","Data":"c45a4ffce2493c9d8e0969ca0e546bced0b62f261eaf051b4d10c59fab9ccafe"} Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.298988 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-vqwr8" podStartSLOduration=15.354432617 podStartE2EDuration="22.298948338s" podCreationTimestamp="2025-11-21 19:17:41 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.517430852 +0000 UTC m=+961.302570879" lastFinishedPulling="2025-11-21 19:17:57.461946573 +0000 UTC m=+968.247086600" observedRunningTime="2025-11-21 19:18:03.28370836 +0000 UTC m=+974.068848407" watchObservedRunningTime="2025-11-21 19:18:03.298948338 +0000 UTC m=+974.084088375" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.323988 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=7.009522581 podStartE2EDuration="19.323958557s" podCreationTimestamp="2025-11-21 19:17:44 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.566393093 +0000 UTC m=+961.351533120" lastFinishedPulling="2025-11-21 19:18:02.880829069 +0000 UTC m=+973.665969096" observedRunningTime="2025-11-21 19:18:03.321664016 +0000 UTC m=+974.106804063" watchObservedRunningTime="2025-11-21 19:18:03.323958557 +0000 UTC m=+974.109098584" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.351518 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=12.775827775 podStartE2EDuration="24.351495953s" podCreationTimestamp="2025-11-21 19:17:39 +0000 UTC" firstStartedPulling="2025-11-21 19:17:51.290280863 +0000 UTC m=+962.075420890" lastFinishedPulling="2025-11-21 19:18:02.865949041 +0000 UTC m=+973.651089068" observedRunningTime="2025-11-21 19:18:03.344368763 +0000 UTC m=+974.129508790" watchObservedRunningTime="2025-11-21 19:18:03.351495953 +0000 UTC m=+974.136635980" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.439853 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-qnhkr"] Nov 21 19:18:03 crc kubenswrapper[4701]: W1121 19:18:03.444455 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e6a9aaa_b68d_4490_b0f3_975c6ace3d1a.slice/crio-f4c852c580319acbb4a5f59bf72920746f39ae0135550a9cbce8701b1d4dff62 WatchSource:0}: Error finding container f4c852c580319acbb4a5f59bf72920746f39ae0135550a9cbce8701b1d4dff62: Status 404 returned error can't find the container with id f4c852c580319acbb4a5f59bf72920746f39ae0135550a9cbce8701b1d4dff62 Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.808867 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 21 19:18:03 crc kubenswrapper[4701]: I1121 19:18:03.858266 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.289420 4701 generic.go:334] "Generic (PLEG): container finished" podID="b6432247-ed58-4dce-98d4-4267d0122151" containerID="559d30179c51ba58890b6b7d53d5ea2d15e4450b68224206b02193a8506cd254" exitCode=0 Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.289503 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b6432247-ed58-4dce-98d4-4267d0122151","Type":"ContainerDied","Data":"559d30179c51ba58890b6b7d53d5ea2d15e4450b68224206b02193a8506cd254"} Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.291354 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qnhkr" event={"ID":"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a","Type":"ContainerStarted","Data":"f4c852c580319acbb4a5f59bf72920746f39ae0135550a9cbce8701b1d4dff62"} Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.293855 4701 generic.go:334] "Generic (PLEG): container finished" podID="117bcee4-5190-4738-8e03-19f77f4fb428" containerID="72bb6c0b638d467678343a87f1a83e5a266f193fa8c192110931c51d258e5550" exitCode=0 Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.293995 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"117bcee4-5190-4738-8e03-19f77f4fb428","Type":"ContainerDied","Data":"72bb6c0b638d467678343a87f1a83e5a266f193fa8c192110931c51d258e5550"} Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.294659 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.356726 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.642437 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6684cc9dc7-878nj"] Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.642702 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="dnsmasq-dns" containerID="cri-o://67110877bcc06f9e505a88b5866543854f2e2588b4bfe0441ac53f13771a8142" gracePeriod=10 Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.645030 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.684072 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c89bf6b7c-2rjgn"] Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.686066 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.688318 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.721867 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c89bf6b7c-2rjgn"] Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.799470 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-msx5f"] Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.801492 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.805330 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.811408 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mdt9\" (UniqueName: \"kubernetes.io/projected/a044ec4b-a230-4337-a491-b48d8e6a03ec-kube-api-access-2mdt9\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.811533 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-dns-svc\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.811580 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-config\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.811597 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-ovsdbserver-sb\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.818393 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-msx5f"] Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.913706 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81c7a23e-51f0-4360-820b-6f4f7b7daa63-config\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.913772 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jsp5\" (UniqueName: \"kubernetes.io/projected/81c7a23e-51f0-4360-820b-6f4f7b7daa63-kube-api-access-4jsp5\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.913810 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-config\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.913983 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-ovsdbserver-sb\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914221 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/81c7a23e-51f0-4360-820b-6f4f7b7daa63-ovs-rundir\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914490 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/81c7a23e-51f0-4360-820b-6f4f7b7daa63-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914576 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mdt9\" (UniqueName: \"kubernetes.io/projected/a044ec4b-a230-4337-a491-b48d8e6a03ec-kube-api-access-2mdt9\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914721 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81c7a23e-51f0-4360-820b-6f4f7b7daa63-combined-ca-bundle\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914727 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-config\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914780 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/81c7a23e-51f0-4360-820b-6f4f7b7daa63-ovn-rundir\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.914989 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-ovsdbserver-sb\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.915020 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-dns-svc\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.915696 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-dns-svc\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:04 crc kubenswrapper[4701]: I1121 19:18:04.939691 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mdt9\" (UniqueName: \"kubernetes.io/projected/a044ec4b-a230-4337-a491-b48d8e6a03ec-kube-api-access-2mdt9\") pod \"dnsmasq-dns-7c89bf6b7c-2rjgn\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.017883 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.018534 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/81c7a23e-51f0-4360-820b-6f4f7b7daa63-ovs-rundir\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.018642 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/81c7a23e-51f0-4360-820b-6f4f7b7daa63-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.018713 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81c7a23e-51f0-4360-820b-6f4f7b7daa63-combined-ca-bundle\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.018736 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/81c7a23e-51f0-4360-820b-6f4f7b7daa63-ovn-rundir\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.018809 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81c7a23e-51f0-4360-820b-6f4f7b7daa63-config\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.018842 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jsp5\" (UniqueName: \"kubernetes.io/projected/81c7a23e-51f0-4360-820b-6f4f7b7daa63-kube-api-access-4jsp5\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.020338 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/81c7a23e-51f0-4360-820b-6f4f7b7daa63-ovs-rundir\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.020445 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/81c7a23e-51f0-4360-820b-6f4f7b7daa63-ovn-rundir\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.024844 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81c7a23e-51f0-4360-820b-6f4f7b7daa63-config\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.037932 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/81c7a23e-51f0-4360-820b-6f4f7b7daa63-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.038246 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81c7a23e-51f0-4360-820b-6f4f7b7daa63-combined-ca-bundle\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.046279 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jsp5\" (UniqueName: \"kubernetes.io/projected/81c7a23e-51f0-4360-820b-6f4f7b7daa63-kube-api-access-4jsp5\") pod \"ovn-controller-metrics-msx5f\" (UID: \"81c7a23e-51f0-4360-820b-6f4f7b7daa63\") " pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.127980 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76446f9f99-vgs97"] Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.130151 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="dnsmasq-dns" containerID="cri-o://72da32b1f0872c1efdfc3f478c0380c5b3bef39e4882b0c98ccf0a58efce1e19" gracePeriod=10 Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.145277 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-msx5f" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.145473 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.173808 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.185764 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59d9b597-bpqmn"] Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.187919 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.195314 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.195376 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d9b597-bpqmn"] Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.244391 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.310602 4701 generic.go:334] "Generic (PLEG): container finished" podID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerID="72da32b1f0872c1efdfc3f478c0380c5b3bef39e4882b0c98ccf0a58efce1e19" exitCode=0 Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.310702 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" event={"ID":"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a","Type":"ContainerDied","Data":"72da32b1f0872c1efdfc3f478c0380c5b3bef39e4882b0c98ccf0a58efce1e19"} Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.313745 4701 generic.go:334] "Generic (PLEG): container finished" podID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerID="67110877bcc06f9e505a88b5866543854f2e2588b4bfe0441ac53f13771a8142" exitCode=0 Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.313795 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" event={"ID":"a6170b01-4aa3-4d90-a317-764721c7e08c","Type":"ContainerDied","Data":"67110877bcc06f9e505a88b5866543854f2e2588b4bfe0441ac53f13771a8142"} Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.314600 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.329177 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-dns-svc\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.329249 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-config\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.329279 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-sb\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.329378 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-nb\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.329419 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znj5f\" (UniqueName: \"kubernetes.io/projected/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-kube-api-access-znj5f\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.363745 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.432731 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znj5f\" (UniqueName: \"kubernetes.io/projected/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-kube-api-access-znj5f\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.433776 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-dns-svc\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.433931 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-config\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.434077 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-sb\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.434365 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-nb\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.439807 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-nb\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.440833 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-dns-svc\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.441102 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-sb\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.441556 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-config\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.462985 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znj5f\" (UniqueName: \"kubernetes.io/projected/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-kube-api-access-znj5f\") pod \"dnsmasq-dns-59d9b597-bpqmn\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.577082 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.646025 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.670645 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.673444 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.674320 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-s5hn8" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.674644 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.686302 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.686726 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747595 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b75b7e0-a14e-4889-9430-7cbb446d48d9-config\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747678 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747704 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747737 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b75b7e0-a14e-4889-9430-7cbb446d48d9-scripts\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747816 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh5wn\" (UniqueName: \"kubernetes.io/projected/4b75b7e0-a14e-4889-9430-7cbb446d48d9-kube-api-access-sh5wn\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747840 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4b75b7e0-a14e-4889-9430-7cbb446d48d9-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.747891 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853153 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh5wn\" (UniqueName: \"kubernetes.io/projected/4b75b7e0-a14e-4889-9430-7cbb446d48d9-kube-api-access-sh5wn\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853242 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4b75b7e0-a14e-4889-9430-7cbb446d48d9-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853295 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853361 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b75b7e0-a14e-4889-9430-7cbb446d48d9-config\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853401 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853417 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.853442 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b75b7e0-a14e-4889-9430-7cbb446d48d9-scripts\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.854513 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b75b7e0-a14e-4889-9430-7cbb446d48d9-scripts\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.855221 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b75b7e0-a14e-4889-9430-7cbb446d48d9-config\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.855334 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4b75b7e0-a14e-4889-9430-7cbb446d48d9-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.860941 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.861140 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.861708 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b75b7e0-a14e-4889-9430-7cbb446d48d9-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:05 crc kubenswrapper[4701]: I1121 19:18:05.872787 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh5wn\" (UniqueName: \"kubernetes.io/projected/4b75b7e0-a14e-4889-9430-7cbb446d48d9-kube-api-access-sh5wn\") pod \"ovn-northd-0\" (UID: \"4b75b7e0-a14e-4889-9430-7cbb446d48d9\") " pod="openstack/ovn-northd-0" Nov 21 19:18:06 crc kubenswrapper[4701]: I1121 19:18:06.001724 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 21 19:18:06 crc kubenswrapper[4701]: I1121 19:18:06.478042 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:18:06 crc kubenswrapper[4701]: E1121 19:18:06.478358 4701 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 19:18:06 crc kubenswrapper[4701]: E1121 19:18:06.478589 4701 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 19:18:06 crc kubenswrapper[4701]: E1121 19:18:06.478678 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift podName:bf8d5d78-fa29-41ff-94e0-6249f7e02e1b nodeName:}" failed. No retries permitted until 2025-11-21 19:18:14.478653815 +0000 UTC m=+985.263793842 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift") pod "swift-storage-0" (UID: "bf8d5d78-fa29-41ff-94e0-6249f7e02e1b") : configmap "swift-ring-files" not found Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.290781 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.308776 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-dns-svc\") pod \"a6170b01-4aa3-4d90-a317-764721c7e08c\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.308997 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-config\") pod \"a6170b01-4aa3-4d90-a317-764721c7e08c\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.309105 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sklw\" (UniqueName: \"kubernetes.io/projected/a6170b01-4aa3-4d90-a317-764721c7e08c-kube-api-access-2sklw\") pod \"a6170b01-4aa3-4d90-a317-764721c7e08c\" (UID: \"a6170b01-4aa3-4d90-a317-764721c7e08c\") " Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.315616 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6170b01-4aa3-4d90-a317-764721c7e08c-kube-api-access-2sklw" (OuterVolumeSpecName: "kube-api-access-2sklw") pod "a6170b01-4aa3-4d90-a317-764721c7e08c" (UID: "a6170b01-4aa3-4d90-a317-764721c7e08c"). InnerVolumeSpecName "kube-api-access-2sklw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.380365 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.380365 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" event={"ID":"a6170b01-4aa3-4d90-a317-764721c7e08c","Type":"ContainerDied","Data":"99c24c2c1b7ad7d72bd16a8ba6c9aabcefbdb6ad68c1282484da485418a58641"} Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.380511 4701 scope.go:117] "RemoveContainer" containerID="67110877bcc06f9e505a88b5866543854f2e2588b4bfe0441ac53f13771a8142" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.384027 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6170b01-4aa3-4d90-a317-764721c7e08c" (UID: "a6170b01-4aa3-4d90-a317-764721c7e08c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.412749 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sklw\" (UniqueName: \"kubernetes.io/projected/a6170b01-4aa3-4d90-a317-764721c7e08c-kube-api-access-2sklw\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.412787 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.432565 4701 scope.go:117] "RemoveContainer" containerID="79583820bc096aedf9c47c940c25ea0b48d1d7ee183082c7bef7d509305ff99e" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.494032 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-config" (OuterVolumeSpecName: "config") pod "a6170b01-4aa3-4d90-a317-764721c7e08c" (UID: "a6170b01-4aa3-4d90-a317-764721c7e08c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.518846 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6170b01-4aa3-4d90-a317-764721c7e08c-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.766482 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6684cc9dc7-878nj"] Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.775054 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6684cc9dc7-878nj"] Nov 21 19:18:07 crc kubenswrapper[4701]: I1121 19:18:07.965391 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" path="/var/lib/kubelet/pods/a6170b01-4aa3-4d90-a317-764721c7e08c/volumes" Nov 21 19:18:08 crc kubenswrapper[4701]: I1121 19:18:08.015853 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c89bf6b7c-2rjgn"] Nov 21 19:18:08 crc kubenswrapper[4701]: I1121 19:18:08.027495 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-msx5f"] Nov 21 19:18:08 crc kubenswrapper[4701]: I1121 19:18:08.171353 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 21 19:18:08 crc kubenswrapper[4701]: I1121 19:18:08.220460 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d9b597-bpqmn"] Nov 21 19:18:08 crc kubenswrapper[4701]: I1121 19:18:08.392056 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b6432247-ed58-4dce-98d4-4267d0122151","Type":"ContainerStarted","Data":"a01f85d77d19352f83fa264e67aacf7da146ddb3c2ea362bf82335daffa69964"} Nov 21 19:18:08 crc kubenswrapper[4701]: I1121 19:18:08.397226 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"117bcee4-5190-4738-8e03-19f77f4fb428","Type":"ContainerStarted","Data":"8c07b45f6029eadc40f90c068376007fa64fa41eac0eacc6f4627b8728bf1523"} Nov 21 19:18:09 crc kubenswrapper[4701]: I1121 19:18:09.464829 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=30.498978241 podStartE2EDuration="37.464806692s" podCreationTimestamp="2025-11-21 19:17:32 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.460520259 +0000 UTC m=+961.245660286" lastFinishedPulling="2025-11-21 19:17:57.42634871 +0000 UTC m=+968.211488737" observedRunningTime="2025-11-21 19:18:09.454172578 +0000 UTC m=+980.239312645" watchObservedRunningTime="2025-11-21 19:18:09.464806692 +0000 UTC m=+980.249946729" Nov 21 19:18:10 crc kubenswrapper[4701]: I1121 19:18:10.667125 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6684cc9dc7-878nj" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.107:5353: i/o timeout" Nov 21 19:18:11 crc kubenswrapper[4701]: I1121 19:18:11.493026 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=29.795509555 podStartE2EDuration="38.492958964s" podCreationTimestamp="2025-11-21 19:17:33 +0000 UTC" firstStartedPulling="2025-11-21 19:17:48.918716101 +0000 UTC m=+959.703856128" lastFinishedPulling="2025-11-21 19:17:57.61616552 +0000 UTC m=+968.401305537" observedRunningTime="2025-11-21 19:18:11.488159196 +0000 UTC m=+982.273299303" watchObservedRunningTime="2025-11-21 19:18:11.492958964 +0000 UTC m=+982.278099021" Nov 21 19:18:11 crc kubenswrapper[4701]: W1121 19:18:11.826533 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b75b7e0_a14e_4889_9430_7cbb446d48d9.slice/crio-e5dc08b603221185c45772bf8408bb97fa22741407daaade7bf6e995b991633b WatchSource:0}: Error finding container e5dc08b603221185c45772bf8408bb97fa22741407daaade7bf6e995b991633b: Status 404 returned error can't find the container with id e5dc08b603221185c45772bf8408bb97fa22741407daaade7bf6e995b991633b Nov 21 19:18:11 crc kubenswrapper[4701]: W1121 19:18:11.828934 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b56d49b_38ad_4fea_9a4b_b6400d7edce9.slice/crio-3ece02819083326ef3828a84ab469d1885894b15842acc9959b124f3ac26dfcf WatchSource:0}: Error finding container 3ece02819083326ef3828a84ab469d1885894b15842acc9959b124f3ac26dfcf: Status 404 returned error can't find the container with id 3ece02819083326ef3828a84ab469d1885894b15842acc9959b124f3ac26dfcf Nov 21 19:18:11 crc kubenswrapper[4701]: I1121 19:18:11.906471 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.030922 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-dns-svc\") pod \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.030971 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hghxt\" (UniqueName: \"kubernetes.io/projected/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-kube-api-access-hghxt\") pod \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.031016 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-config\") pod \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\" (UID: \"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a\") " Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.040586 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-kube-api-access-hghxt" (OuterVolumeSpecName: "kube-api-access-hghxt") pod "5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" (UID: "5a12e1a0-73d2-4cfe-96d2-d311378a6e3a"). InnerVolumeSpecName "kube-api-access-hghxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.087618 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-config" (OuterVolumeSpecName: "config") pod "5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" (UID: "5a12e1a0-73d2-4cfe-96d2-d311378a6e3a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.103860 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" (UID: "5a12e1a0-73d2-4cfe-96d2-d311378a6e3a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.135659 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.135704 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hghxt\" (UniqueName: \"kubernetes.io/projected/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-kube-api-access-hghxt\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.135721 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.467409 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-msx5f" event={"ID":"81c7a23e-51f0-4360-820b-6f4f7b7daa63","Type":"ContainerStarted","Data":"eddfbd4dfd85b4b7546e5597e84a2a3a41877f1eb0383ddbf3025acad8222ff3"} Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.469471 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" event={"ID":"a044ec4b-a230-4337-a491-b48d8e6a03ec","Type":"ContainerStarted","Data":"c1d535f93bcb32d973bf46596cf37f2118ce06b1686236b6fc6ea53a45aacb1f"} Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.471516 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" event={"ID":"7b56d49b-38ad-4fea-9a4b-b6400d7edce9","Type":"ContainerStarted","Data":"3ece02819083326ef3828a84ab469d1885894b15842acc9959b124f3ac26dfcf"} Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.476603 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" event={"ID":"5a12e1a0-73d2-4cfe-96d2-d311378a6e3a","Type":"ContainerDied","Data":"e9faca7d795029e0eb6f3006962bf2100844d101676b1386ec3c3eb9c57eedc4"} Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.476685 4701 scope.go:117] "RemoveContainer" containerID="72da32b1f0872c1efdfc3f478c0380c5b3bef39e4882b0c98ccf0a58efce1e19" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.476689 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.478948 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4b75b7e0-a14e-4889-9430-7cbb446d48d9","Type":"ContainerStarted","Data":"e5dc08b603221185c45772bf8408bb97fa22741407daaade7bf6e995b991633b"} Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.482008 4701 generic.go:334] "Generic (PLEG): container finished" podID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerID="f0ca2e7b3ac945a8838678f89fec58d115b4447f42315ce05d64579930fba011" exitCode=0 Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.482072 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerDied","Data":"f0ca2e7b3ac945a8838678f89fec58d115b4447f42315ce05d64579930fba011"} Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.537245 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76446f9f99-vgs97"] Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.550536 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76446f9f99-vgs97"] Nov 21 19:18:12 crc kubenswrapper[4701]: I1121 19:18:12.931963 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-76446f9f99-vgs97" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.120:5353: i/o timeout" Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.113367 4701 scope.go:117] "RemoveContainer" containerID="13c69af73622b2f5c78c9510af85204af7e96e80218d27b521c54177190ea743" Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.498406 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" event={"ID":"a044ec4b-a230-4337-a491-b48d8e6a03ec","Type":"ContainerDied","Data":"fd950899157286a9adabf9e5cd07d618b9b829b40bf97bc89ff2ded6587c4e76"} Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.498307 4701 generic.go:334] "Generic (PLEG): container finished" podID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerID="fd950899157286a9adabf9e5cd07d618b9b829b40bf97bc89ff2ded6587c4e76" exitCode=0 Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.502781 4701 generic.go:334] "Generic (PLEG): container finished" podID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerID="56c7a689e48903eb02263d6981ae3f8bbfabcec4e317bf11c7228f43f30fb6a7" exitCode=0 Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.502885 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" event={"ID":"7b56d49b-38ad-4fea-9a4b-b6400d7edce9","Type":"ContainerDied","Data":"56c7a689e48903eb02263d6981ae3f8bbfabcec4e317bf11c7228f43f30fb6a7"} Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.504825 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.505149 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.510269 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qnhkr" event={"ID":"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a","Type":"ContainerStarted","Data":"c26dcda8f160617a618ee28510152ac01f79b05d345645c0e1613a780c2ae453"} Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.567684 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-qnhkr" podStartSLOduration=2.792672628 podStartE2EDuration="11.567651591s" podCreationTimestamp="2025-11-21 19:18:02 +0000 UTC" firstStartedPulling="2025-11-21 19:18:03.447514013 +0000 UTC m=+974.232654040" lastFinishedPulling="2025-11-21 19:18:12.222492976 +0000 UTC m=+983.007633003" observedRunningTime="2025-11-21 19:18:13.560287185 +0000 UTC m=+984.345427222" watchObservedRunningTime="2025-11-21 19:18:13.567651591 +0000 UTC m=+984.352791648" Nov 21 19:18:13 crc kubenswrapper[4701]: I1121 19:18:13.965912 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" path="/var/lib/kubelet/pods/5a12e1a0-73d2-4cfe-96d2-d311378a6e3a/volumes" Nov 21 19:18:14 crc kubenswrapper[4701]: I1121 19:18:14.500100 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:18:14 crc kubenswrapper[4701]: E1121 19:18:14.500804 4701 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 19:18:14 crc kubenswrapper[4701]: E1121 19:18:14.500948 4701 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 19:18:14 crc kubenswrapper[4701]: E1121 19:18:14.501003 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift podName:bf8d5d78-fa29-41ff-94e0-6249f7e02e1b nodeName:}" failed. No retries permitted until 2025-11-21 19:18:30.500986777 +0000 UTC m=+1001.286126804 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift") pod "swift-storage-0" (UID: "bf8d5d78-fa29-41ff-94e0-6249f7e02e1b") : configmap "swift-ring-files" not found Nov 21 19:18:14 crc kubenswrapper[4701]: I1121 19:18:14.793028 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 21 19:18:14 crc kubenswrapper[4701]: I1121 19:18:14.793103 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.529981 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4b75b7e0-a14e-4889-9430-7cbb446d48d9","Type":"ContainerStarted","Data":"7e4ca187feee51944fc53543b02b8174de8775bec81297551df1b333421e2849"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.530558 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4b75b7e0-a14e-4889-9430-7cbb446d48d9","Type":"ContainerStarted","Data":"5b7389ef288b66e000821d2ed837daa649b39279ec8150c75fb5022e255b0742"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.530591 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.534878 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-msx5f" event={"ID":"81c7a23e-51f0-4360-820b-6f4f7b7daa63","Type":"ContainerStarted","Data":"5fdda5625a7b5a7498088a87c7edc8f02ede8e4d09cb1dbd5f5e1986482c243f"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.539080 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-49p6k" event={"ID":"d6bce0ec-3045-405e-914b-f466321dc7ea","Type":"ContainerStarted","Data":"ba1e9c5c323453e61516585094e4b850bb09388cc9c97d6bebb8339d9f0fda0e"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.539566 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-49p6k" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.545305 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"600d7142-cf1a-4e30-968d-5b75a572085d","Type":"ContainerStarted","Data":"e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.545547 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.548683 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" event={"ID":"a044ec4b-a230-4337-a491-b48d8e6a03ec","Type":"ContainerStarted","Data":"a2eea557e2c18040b31182e68b888a7e9a4d89f84a63eb8b36a0444ad0c2d230"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.548997 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.560909 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=8.533136028 podStartE2EDuration="10.560876669s" podCreationTimestamp="2025-11-21 19:18:05 +0000 UTC" firstStartedPulling="2025-11-21 19:18:11.835714206 +0000 UTC m=+982.620854263" lastFinishedPulling="2025-11-21 19:18:13.863454877 +0000 UTC m=+984.648594904" observedRunningTime="2025-11-21 19:18:15.557329384 +0000 UTC m=+986.342469421" watchObservedRunningTime="2025-11-21 19:18:15.560876669 +0000 UTC m=+986.346016696" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.565702 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" event={"ID":"7b56d49b-38ad-4fea-9a4b-b6400d7edce9","Type":"ContainerStarted","Data":"c410626c6c1910a555d22df9b01e01c674f4a44ae932b15e05e7345be65bbe8f"} Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.567346 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.588598 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-49p6k" podStartSLOduration=12.775093447 podStartE2EDuration="34.588575721s" podCreationTimestamp="2025-11-21 19:17:41 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.56704313 +0000 UTC m=+961.352183157" lastFinishedPulling="2025-11-21 19:18:12.380525394 +0000 UTC m=+983.165665431" observedRunningTime="2025-11-21 19:18:15.585662963 +0000 UTC m=+986.370802990" watchObservedRunningTime="2025-11-21 19:18:15.588575721 +0000 UTC m=+986.373715748" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.616537 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" podStartSLOduration=11.616518618 podStartE2EDuration="11.616518618s" podCreationTimestamp="2025-11-21 19:18:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:18:15.614636528 +0000 UTC m=+986.399776555" watchObservedRunningTime="2025-11-21 19:18:15.616518618 +0000 UTC m=+986.401658645" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.640856 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-msx5f" podStartSLOduration=11.640834388 podStartE2EDuration="11.640834388s" podCreationTimestamp="2025-11-21 19:18:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:18:15.636749019 +0000 UTC m=+986.421889046" watchObservedRunningTime="2025-11-21 19:18:15.640834388 +0000 UTC m=+986.425974415" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.683356 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=14.54020647 podStartE2EDuration="38.683324555s" podCreationTimestamp="2025-11-21 19:17:37 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.56706901 +0000 UTC m=+961.352209047" lastFinishedPulling="2025-11-21 19:18:14.710187105 +0000 UTC m=+985.495327132" observedRunningTime="2025-11-21 19:18:15.671392106 +0000 UTC m=+986.456532133" watchObservedRunningTime="2025-11-21 19:18:15.683324555 +0000 UTC m=+986.468464592" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.806608 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.832256 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" podStartSLOduration=10.832237091 podStartE2EDuration="10.832237091s" podCreationTimestamp="2025-11-21 19:18:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:18:15.700919897 +0000 UTC m=+986.486059924" watchObservedRunningTime="2025-11-21 19:18:15.832237091 +0000 UTC m=+986.617377118" Nov 21 19:18:15 crc kubenswrapper[4701]: I1121 19:18:15.949356 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.147394 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.299441 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.544328 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-3a61-account-create-77s5z"] Nov 21 19:18:17 crc kubenswrapper[4701]: E1121 19:18:17.544759 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="dnsmasq-dns" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.544774 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="dnsmasq-dns" Nov 21 19:18:17 crc kubenswrapper[4701]: E1121 19:18:17.544787 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="init" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.544795 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="init" Nov 21 19:18:17 crc kubenswrapper[4701]: E1121 19:18:17.544813 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="dnsmasq-dns" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.544821 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="dnsmasq-dns" Nov 21 19:18:17 crc kubenswrapper[4701]: E1121 19:18:17.544835 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="init" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.544842 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="init" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.545167 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6170b01-4aa3-4d90-a317-764721c7e08c" containerName="dnsmasq-dns" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.545228 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a12e1a0-73d2-4cfe-96d2-d311378a6e3a" containerName="dnsmasq-dns" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.545987 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.549460 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.555187 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-3a61-account-create-77s5z"] Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.593615 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-f7jkr"] Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.596778 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.603897 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-f7jkr"] Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.684992 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s7nz\" (UniqueName: \"kubernetes.io/projected/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-kube-api-access-9s7nz\") pod \"watcher-3a61-account-create-77s5z\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.685080 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-operator-scripts\") pod \"watcher-3a61-account-create-77s5z\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.787644 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skmtn\" (UniqueName: \"kubernetes.io/projected/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-kube-api-access-skmtn\") pod \"watcher-db-create-f7jkr\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.787730 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s7nz\" (UniqueName: \"kubernetes.io/projected/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-kube-api-access-9s7nz\") pod \"watcher-3a61-account-create-77s5z\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.787777 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-operator-scripts\") pod \"watcher-db-create-f7jkr\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.787861 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-operator-scripts\") pod \"watcher-3a61-account-create-77s5z\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.788983 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-operator-scripts\") pod \"watcher-3a61-account-create-77s5z\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.811317 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s7nz\" (UniqueName: \"kubernetes.io/projected/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-kube-api-access-9s7nz\") pod \"watcher-3a61-account-create-77s5z\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.882684 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.891158 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skmtn\" (UniqueName: \"kubernetes.io/projected/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-kube-api-access-skmtn\") pod \"watcher-db-create-f7jkr\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.891246 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-operator-scripts\") pod \"watcher-db-create-f7jkr\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.892029 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-operator-scripts\") pod \"watcher-db-create-f7jkr\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.912095 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skmtn\" (UniqueName: \"kubernetes.io/projected/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-kube-api-access-skmtn\") pod \"watcher-db-create-f7jkr\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:17 crc kubenswrapper[4701]: I1121 19:18:17.912357 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:20 crc kubenswrapper[4701]: I1121 19:18:20.020487 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:20 crc kubenswrapper[4701]: I1121 19:18:20.742055 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:20 crc kubenswrapper[4701]: I1121 19:18:20.827508 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c89bf6b7c-2rjgn"] Nov 21 19:18:20 crc kubenswrapper[4701]: I1121 19:18:20.827813 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="dnsmasq-dns" containerID="cri-o://a2eea557e2c18040b31182e68b888a7e9a4d89f84a63eb8b36a0444ad0c2d230" gracePeriod=10 Nov 21 19:18:21 crc kubenswrapper[4701]: I1121 19:18:21.768700 4701 generic.go:334] "Generic (PLEG): container finished" podID="6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" containerID="c26dcda8f160617a618ee28510152ac01f79b05d345645c0e1613a780c2ae453" exitCode=0 Nov 21 19:18:21 crc kubenswrapper[4701]: I1121 19:18:21.768795 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qnhkr" event={"ID":"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a","Type":"ContainerDied","Data":"c26dcda8f160617a618ee28510152ac01f79b05d345645c0e1613a780c2ae453"} Nov 21 19:18:21 crc kubenswrapper[4701]: I1121 19:18:21.773080 4701 generic.go:334] "Generic (PLEG): container finished" podID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerID="a2eea557e2c18040b31182e68b888a7e9a4d89f84a63eb8b36a0444ad0c2d230" exitCode=0 Nov 21 19:18:21 crc kubenswrapper[4701]: I1121 19:18:21.773155 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" event={"ID":"a044ec4b-a230-4337-a491-b48d8e6a03ec","Type":"ContainerDied","Data":"a2eea557e2c18040b31182e68b888a7e9a4d89f84a63eb8b36a0444ad0c2d230"} Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.743773 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-h5fzs"] Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.746375 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.767508 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1708d089-4719-4de2-af63-748de46758d4-operator-scripts\") pod \"keystone-db-create-h5fzs\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.769945 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qggz\" (UniqueName: \"kubernetes.io/projected/1708d089-4719-4de2-af63-748de46758d4-kube-api-access-4qggz\") pod \"keystone-db-create-h5fzs\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.768764 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-h5fzs"] Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.853323 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-65e8-account-create-xh7st"] Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.854642 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.859008 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.875468 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1708d089-4719-4de2-af63-748de46758d4-operator-scripts\") pod \"keystone-db-create-h5fzs\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.875534 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qggz\" (UniqueName: \"kubernetes.io/projected/1708d089-4719-4de2-af63-748de46758d4-kube-api-access-4qggz\") pod \"keystone-db-create-h5fzs\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.875583 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5bfc4d-be86-4362-bd2f-9dc613af744a-operator-scripts\") pod \"keystone-65e8-account-create-xh7st\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.875641 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdbdv\" (UniqueName: \"kubernetes.io/projected/2e5bfc4d-be86-4362-bd2f-9dc613af744a-kube-api-access-sdbdv\") pod \"keystone-65e8-account-create-xh7st\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.876576 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1708d089-4719-4de2-af63-748de46758d4-operator-scripts\") pod \"keystone-db-create-h5fzs\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.891251 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-65e8-account-create-xh7st"] Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.911113 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qggz\" (UniqueName: \"kubernetes.io/projected/1708d089-4719-4de2-af63-748de46758d4-kube-api-access-4qggz\") pod \"keystone-db-create-h5fzs\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.976077 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.976821 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdbdv\" (UniqueName: \"kubernetes.io/projected/2e5bfc4d-be86-4362-bd2f-9dc613af744a-kube-api-access-sdbdv\") pod \"keystone-65e8-account-create-xh7st\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.977024 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5bfc4d-be86-4362-bd2f-9dc613af744a-operator-scripts\") pod \"keystone-65e8-account-create-xh7st\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:24 crc kubenswrapper[4701]: I1121 19:18:24.979294 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5bfc4d-be86-4362-bd2f-9dc613af744a-operator-scripts\") pod \"keystone-65e8-account-create-xh7st\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.007015 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdbdv\" (UniqueName: \"kubernetes.io/projected/2e5bfc4d-be86-4362-bd2f-9dc613af744a-kube-api-access-sdbdv\") pod \"keystone-65e8-account-create-xh7st\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.021559 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081328 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-dispersionconf\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081417 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdbss\" (UniqueName: \"kubernetes.io/projected/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-kube-api-access-zdbss\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081451 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-ring-data-devices\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081527 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-combined-ca-bundle\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081669 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-swiftconf\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081711 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-scripts\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.081819 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-etc-swift\") pod \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\" (UID: \"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.084657 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.088985 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.093358 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-kube-api-access-zdbss" (OuterVolumeSpecName: "kube-api-access-zdbss") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "kube-api-access-zdbss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.094160 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.095589 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.099000 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.099732 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-t695v"] Nov 21 19:18:25 crc kubenswrapper[4701]: E1121 19:18:25.102449 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" containerName="swift-ring-rebalance" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.102472 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" containerName="swift-ring-rebalance" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.108350 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" containerName="swift-ring-rebalance" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.111663 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.111738 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-scripts" (OuterVolumeSpecName: "scripts") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.152413 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.165220 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-t695v"] Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.188477 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/372e7b57-848c-4342-a448-3b3cb9b80aa4-operator-scripts\") pod \"placement-db-create-t695v\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.188626 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn9t4\" (UniqueName: \"kubernetes.io/projected/372e7b57-848c-4342-a448-3b3cb9b80aa4-kube-api-access-fn9t4\") pod \"placement-db-create-t695v\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.189102 4701 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.189147 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdbss\" (UniqueName: \"kubernetes.io/projected/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-kube-api-access-zdbss\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.189159 4701 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.189169 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.189223 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.189235 4701 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.194969 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a" (UID: "6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.216865 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-d97b-account-create-cxhsn"] Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.218481 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.221755 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.226683 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d97b-account-create-cxhsn"] Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.295909 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/372e7b57-848c-4342-a448-3b3cb9b80aa4-operator-scripts\") pod \"placement-db-create-t695v\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.295999 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqgn4\" (UniqueName: \"kubernetes.io/projected/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-kube-api-access-xqgn4\") pod \"placement-d97b-account-create-cxhsn\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.296049 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn9t4\" (UniqueName: \"kubernetes.io/projected/372e7b57-848c-4342-a448-3b3cb9b80aa4-kube-api-access-fn9t4\") pod \"placement-db-create-t695v\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.296077 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-operator-scripts\") pod \"placement-d97b-account-create-cxhsn\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.296171 4701 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.303979 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/372e7b57-848c-4342-a448-3b3cb9b80aa4-operator-scripts\") pod \"placement-db-create-t695v\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.321520 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn9t4\" (UniqueName: \"kubernetes.io/projected/372e7b57-848c-4342-a448-3b3cb9b80aa4-kube-api-access-fn9t4\") pod \"placement-db-create-t695v\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.398099 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-operator-scripts\") pod \"placement-d97b-account-create-cxhsn\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.398278 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqgn4\" (UniqueName: \"kubernetes.io/projected/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-kube-api-access-xqgn4\") pod \"placement-d97b-account-create-cxhsn\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.399535 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-operator-scripts\") pod \"placement-d97b-account-create-cxhsn\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.429675 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqgn4\" (UniqueName: \"kubernetes.io/projected/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-kube-api-access-xqgn4\") pod \"placement-d97b-account-create-cxhsn\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.473258 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-t695v" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.480634 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.538347 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.596642 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-f7jkr"] Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.602169 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-config\") pod \"a044ec4b-a230-4337-a491-b48d8e6a03ec\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.602244 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-dns-svc\") pod \"a044ec4b-a230-4337-a491-b48d8e6a03ec\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.602509 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-ovsdbserver-sb\") pod \"a044ec4b-a230-4337-a491-b48d8e6a03ec\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.602602 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mdt9\" (UniqueName: \"kubernetes.io/projected/a044ec4b-a230-4337-a491-b48d8e6a03ec-kube-api-access-2mdt9\") pod \"a044ec4b-a230-4337-a491-b48d8e6a03ec\" (UID: \"a044ec4b-a230-4337-a491-b48d8e6a03ec\") " Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.621916 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a044ec4b-a230-4337-a491-b48d8e6a03ec-kube-api-access-2mdt9" (OuterVolumeSpecName: "kube-api-access-2mdt9") pod "a044ec4b-a230-4337-a491-b48d8e6a03ec" (UID: "a044ec4b-a230-4337-a491-b48d8e6a03ec"). InnerVolumeSpecName "kube-api-access-2mdt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.636548 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-3a61-account-create-77s5z"] Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.670596 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a044ec4b-a230-4337-a491-b48d8e6a03ec" (UID: "a044ec4b-a230-4337-a491-b48d8e6a03ec"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.688369 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-config" (OuterVolumeSpecName: "config") pod "a044ec4b-a230-4337-a491-b48d8e6a03ec" (UID: "a044ec4b-a230-4337-a491-b48d8e6a03ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.690033 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a044ec4b-a230-4337-a491-b48d8e6a03ec" (UID: "a044ec4b-a230-4337-a491-b48d8e6a03ec"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.706909 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.708433 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mdt9\" (UniqueName: \"kubernetes.io/projected/a044ec4b-a230-4337-a491-b48d8e6a03ec-kube-api-access-2mdt9\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.708491 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.708508 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a044ec4b-a230-4337-a491-b48d8e6a03ec-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.835110 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-65e8-account-create-xh7st"] Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.836487 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3a61-account-create-77s5z" event={"ID":"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59","Type":"ContainerStarted","Data":"e45a5c2f47ff602b8564e7902a3d9f4a662b9a94dcdba0ae76885b0be37f3b72"} Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.838746 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qnhkr" event={"ID":"6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a","Type":"ContainerDied","Data":"f4c852c580319acbb4a5f59bf72920746f39ae0135550a9cbce8701b1d4dff62"} Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.838782 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4c852c580319acbb4a5f59bf72920746f39ae0135550a9cbce8701b1d4dff62" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.838856 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qnhkr" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.842711 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" event={"ID":"a044ec4b-a230-4337-a491-b48d8e6a03ec","Type":"ContainerDied","Data":"c1d535f93bcb32d973bf46596cf37f2118ce06b1686236b6fc6ea53a45aacb1f"} Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.842761 4701 scope.go:117] "RemoveContainer" containerID="a2eea557e2c18040b31182e68b888a7e9a4d89f84a63eb8b36a0444ad0c2d230" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.842977 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c89bf6b7c-2rjgn" Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.858773 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-f7jkr" event={"ID":"ae19d0b9-054a-479e-8ed5-cff6df83a7ba","Type":"ContainerStarted","Data":"e1e0892ce7db405beef695d1fd0a5fd20b48a2e2152ed795ed2cec0f90bda032"} Nov 21 19:18:25 crc kubenswrapper[4701]: I1121 19:18:25.867858 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerStarted","Data":"c187480c727f299bcd2020511bfa84e39c898b0ca7849ceb4a8712f49642daf5"} Nov 21 19:18:25 crc kubenswrapper[4701]: W1121 19:18:25.877620 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e5bfc4d_be86_4362_bd2f_9dc613af744a.slice/crio-accc487a1630533091e5486f7af6e5315e845ba69a7018a6acb6b28fd9494962 WatchSource:0}: Error finding container accc487a1630533091e5486f7af6e5315e845ba69a7018a6acb6b28fd9494962: Status 404 returned error can't find the container with id accc487a1630533091e5486f7af6e5315e845ba69a7018a6acb6b28fd9494962 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:25.998893 4701 scope.go:117] "RemoveContainer" containerID="fd950899157286a9adabf9e5cd07d618b9b829b40bf97bc89ff2ded6587c4e76" Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.062300 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-h5fzs"] Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.116842 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.214896 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c89bf6b7c-2rjgn"] Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.234555 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c89bf6b7c-2rjgn"] Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.248933 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-t695v"] Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.264788 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d97b-account-create-cxhsn"] Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.911175 4701 generic.go:334] "Generic (PLEG): container finished" podID="e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" containerID="c0841604da0076c938df5bf002791e90742140ae154f52d377179a475738593c" exitCode=0 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.911323 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3a61-account-create-77s5z" event={"ID":"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59","Type":"ContainerDied","Data":"c0841604da0076c938df5bf002791e90742140ae154f52d377179a475738593c"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.916348 4701 generic.go:334] "Generic (PLEG): container finished" podID="2e5bfc4d-be86-4362-bd2f-9dc613af744a" containerID="cd64dd68f08fcd5336538fe9a99b3a0f95bacca4cddc3137671d5d9a9d61a2aa" exitCode=0 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.916430 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-65e8-account-create-xh7st" event={"ID":"2e5bfc4d-be86-4362-bd2f-9dc613af744a","Type":"ContainerDied","Data":"cd64dd68f08fcd5336538fe9a99b3a0f95bacca4cddc3137671d5d9a9d61a2aa"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.916462 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-65e8-account-create-xh7st" event={"ID":"2e5bfc4d-be86-4362-bd2f-9dc613af744a","Type":"ContainerStarted","Data":"accc487a1630533091e5486f7af6e5315e845ba69a7018a6acb6b28fd9494962"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.919064 4701 generic.go:334] "Generic (PLEG): container finished" podID="1708d089-4719-4de2-af63-748de46758d4" containerID="a85c35eaf0a095de96a20a175d4c0d830fcdc297a080ea405fd21e32c603c3a7" exitCode=0 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.919237 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-h5fzs" event={"ID":"1708d089-4719-4de2-af63-748de46758d4","Type":"ContainerDied","Data":"a85c35eaf0a095de96a20a175d4c0d830fcdc297a080ea405fd21e32c603c3a7"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.919273 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-h5fzs" event={"ID":"1708d089-4719-4de2-af63-748de46758d4","Type":"ContainerStarted","Data":"1afbc9b73d0c5f112e07517cdd32b40c723e86a284ae0465bf0bf41231998223"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.921711 4701 generic.go:334] "Generic (PLEG): container finished" podID="372e7b57-848c-4342-a448-3b3cb9b80aa4" containerID="ee5d884df78d68a09f41cea83ca3d19a11d33ccf31d3a483e0660757388d02a5" exitCode=0 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.921762 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-t695v" event={"ID":"372e7b57-848c-4342-a448-3b3cb9b80aa4","Type":"ContainerDied","Data":"ee5d884df78d68a09f41cea83ca3d19a11d33ccf31d3a483e0660757388d02a5"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.921780 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-t695v" event={"ID":"372e7b57-848c-4342-a448-3b3cb9b80aa4","Type":"ContainerStarted","Data":"3a2dc58aa8c6bbf0e95cc97a783d7d02fff5aae17b86b56c1201cca35017b6ec"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.924375 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae19d0b9-054a-479e-8ed5-cff6df83a7ba" containerID="144ab1c00e70af0f3ce24cfd4ca81d940aeab24cd8147543c02a5afc7cc22f70" exitCode=0 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.924436 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-f7jkr" event={"ID":"ae19d0b9-054a-479e-8ed5-cff6df83a7ba","Type":"ContainerDied","Data":"144ab1c00e70af0f3ce24cfd4ca81d940aeab24cd8147543c02a5afc7cc22f70"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.927506 4701 generic.go:334] "Generic (PLEG): container finished" podID="b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" containerID="0e9b77e4afcfdfcec70788530b66cce6fe61e13c29b7b8dfec8da335ca28e5fa" exitCode=0 Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.927559 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d97b-account-create-cxhsn" event={"ID":"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543","Type":"ContainerDied","Data":"0e9b77e4afcfdfcec70788530b66cce6fe61e13c29b7b8dfec8da335ca28e5fa"} Nov 21 19:18:26 crc kubenswrapper[4701]: I1121 19:18:26.927575 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d97b-account-create-cxhsn" event={"ID":"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543","Type":"ContainerStarted","Data":"1992996a37781b54261b0079fea6b34d95716efc28cbeec7ce138651fd26d773"} Nov 21 19:18:26 crc kubenswrapper[4701]: E1121 19:18:26.993124 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod372e7b57_848c_4342_a448_3b3cb9b80aa4.slice/crio-ee5d884df78d68a09f41cea83ca3d19a11d33ccf31d3a483e0660757388d02a5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb57c7d3c_d5f8_45d9_bf1b_51ec05afd543.slice/crio-0e9b77e4afcfdfcec70788530b66cce6fe61e13c29b7b8dfec8da335ca28e5fa.scope\": RecentStats: unable to find data in memory cache]" Nov 21 19:18:27 crc kubenswrapper[4701]: I1121 19:18:27.373627 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 21 19:18:27 crc kubenswrapper[4701]: I1121 19:18:27.964484 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" path="/var/lib/kubelet/pods/a044ec4b-a230-4337-a491-b48d8e6a03ec/volumes" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.322376 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.395112 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qggz\" (UniqueName: \"kubernetes.io/projected/1708d089-4719-4de2-af63-748de46758d4-kube-api-access-4qggz\") pod \"1708d089-4719-4de2-af63-748de46758d4\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.395447 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1708d089-4719-4de2-af63-748de46758d4-operator-scripts\") pod \"1708d089-4719-4de2-af63-748de46758d4\" (UID: \"1708d089-4719-4de2-af63-748de46758d4\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.396754 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1708d089-4719-4de2-af63-748de46758d4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1708d089-4719-4de2-af63-748de46758d4" (UID: "1708d089-4719-4de2-af63-748de46758d4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.411630 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1708d089-4719-4de2-af63-748de46758d4-kube-api-access-4qggz" (OuterVolumeSpecName: "kube-api-access-4qggz") pod "1708d089-4719-4de2-af63-748de46758d4" (UID: "1708d089-4719-4de2-af63-748de46758d4"). InnerVolumeSpecName "kube-api-access-4qggz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.498684 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1708d089-4719-4de2-af63-748de46758d4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.498726 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qggz\" (UniqueName: \"kubernetes.io/projected/1708d089-4719-4de2-af63-748de46758d4-kube-api-access-4qggz\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.533583 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.542903 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.569082 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.578420 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-t695v" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.579724 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.600015 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skmtn\" (UniqueName: \"kubernetes.io/projected/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-kube-api-access-skmtn\") pod \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.600230 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-operator-scripts\") pod \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\" (UID: \"ae19d0b9-054a-479e-8ed5-cff6df83a7ba\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.600303 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9s7nz\" (UniqueName: \"kubernetes.io/projected/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-kube-api-access-9s7nz\") pod \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.600369 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-operator-scripts\") pod \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\" (UID: \"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.600442 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-operator-scripts\") pod \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.600596 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqgn4\" (UniqueName: \"kubernetes.io/projected/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-kube-api-access-xqgn4\") pod \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\" (UID: \"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.602796 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae19d0b9-054a-479e-8ed5-cff6df83a7ba" (UID: "ae19d0b9-054a-479e-8ed5-cff6df83a7ba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.603075 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" (UID: "b57c7d3c-d5f8-45d9-bf1b-51ec05afd543"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.603089 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" (UID: "e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.611386 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-kube-api-access-9s7nz" (OuterVolumeSpecName: "kube-api-access-9s7nz") pod "e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" (UID: "e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59"). InnerVolumeSpecName "kube-api-access-9s7nz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.611470 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-kube-api-access-xqgn4" (OuterVolumeSpecName: "kube-api-access-xqgn4") pod "b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" (UID: "b57c7d3c-d5f8-45d9-bf1b-51ec05afd543"). InnerVolumeSpecName "kube-api-access-xqgn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.611827 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-kube-api-access-skmtn" (OuterVolumeSpecName: "kube-api-access-skmtn") pod "ae19d0b9-054a-479e-8ed5-cff6df83a7ba" (UID: "ae19d0b9-054a-479e-8ed5-cff6df83a7ba"). InnerVolumeSpecName "kube-api-access-skmtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.707734 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/372e7b57-848c-4342-a448-3b3cb9b80aa4-operator-scripts\") pod \"372e7b57-848c-4342-a448-3b3cb9b80aa4\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.708140 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5bfc4d-be86-4362-bd2f-9dc613af744a-operator-scripts\") pod \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.708348 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn9t4\" (UniqueName: \"kubernetes.io/projected/372e7b57-848c-4342-a448-3b3cb9b80aa4-kube-api-access-fn9t4\") pod \"372e7b57-848c-4342-a448-3b3cb9b80aa4\" (UID: \"372e7b57-848c-4342-a448-3b3cb9b80aa4\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.708403 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdbdv\" (UniqueName: \"kubernetes.io/projected/2e5bfc4d-be86-4362-bd2f-9dc613af744a-kube-api-access-sdbdv\") pod \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\" (UID: \"2e5bfc4d-be86-4362-bd2f-9dc613af744a\") " Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.708560 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/372e7b57-848c-4342-a448-3b3cb9b80aa4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "372e7b57-848c-4342-a448-3b3cb9b80aa4" (UID: "372e7b57-848c-4342-a448-3b3cb9b80aa4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709035 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5bfc4d-be86-4362-bd2f-9dc613af744a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e5bfc4d-be86-4362-bd2f-9dc613af744a" (UID: "2e5bfc4d-be86-4362-bd2f-9dc613af744a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709456 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skmtn\" (UniqueName: \"kubernetes.io/projected/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-kube-api-access-skmtn\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709507 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5bfc4d-be86-4362-bd2f-9dc613af744a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709529 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae19d0b9-054a-479e-8ed5-cff6df83a7ba-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709549 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9s7nz\" (UniqueName: \"kubernetes.io/projected/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-kube-api-access-9s7nz\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709569 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709589 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709607 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/372e7b57-848c-4342-a448-3b3cb9b80aa4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.709627 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqgn4\" (UniqueName: \"kubernetes.io/projected/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543-kube-api-access-xqgn4\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.712528 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/372e7b57-848c-4342-a448-3b3cb9b80aa4-kube-api-access-fn9t4" (OuterVolumeSpecName: "kube-api-access-fn9t4") pod "372e7b57-848c-4342-a448-3b3cb9b80aa4" (UID: "372e7b57-848c-4342-a448-3b3cb9b80aa4"). InnerVolumeSpecName "kube-api-access-fn9t4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.712899 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e5bfc4d-be86-4362-bd2f-9dc613af744a-kube-api-access-sdbdv" (OuterVolumeSpecName: "kube-api-access-sdbdv") pod "2e5bfc4d-be86-4362-bd2f-9dc613af744a" (UID: "2e5bfc4d-be86-4362-bd2f-9dc613af744a"). InnerVolumeSpecName "kube-api-access-sdbdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.811419 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn9t4\" (UniqueName: \"kubernetes.io/projected/372e7b57-848c-4342-a448-3b3cb9b80aa4-kube-api-access-fn9t4\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.811786 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdbdv\" (UniqueName: \"kubernetes.io/projected/2e5bfc4d-be86-4362-bd2f-9dc613af744a-kube-api-access-sdbdv\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.960019 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-65e8-account-create-xh7st" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.960025 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-65e8-account-create-xh7st" event={"ID":"2e5bfc4d-be86-4362-bd2f-9dc613af744a","Type":"ContainerDied","Data":"accc487a1630533091e5486f7af6e5315e845ba69a7018a6acb6b28fd9494962"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.960168 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="accc487a1630533091e5486f7af6e5315e845ba69a7018a6acb6b28fd9494962" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.962925 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-h5fzs" event={"ID":"1708d089-4719-4de2-af63-748de46758d4","Type":"ContainerDied","Data":"1afbc9b73d0c5f112e07517cdd32b40c723e86a284ae0465bf0bf41231998223"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.963784 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1afbc9b73d0c5f112e07517cdd32b40c723e86a284ae0465bf0bf41231998223" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.962978 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-h5fzs" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.966256 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-t695v" event={"ID":"372e7b57-848c-4342-a448-3b3cb9b80aa4","Type":"ContainerDied","Data":"3a2dc58aa8c6bbf0e95cc97a783d7d02fff5aae17b86b56c1201cca35017b6ec"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.966309 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a2dc58aa8c6bbf0e95cc97a783d7d02fff5aae17b86b56c1201cca35017b6ec" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.966311 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-t695v" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.969056 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-f7jkr" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.969057 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-f7jkr" event={"ID":"ae19d0b9-054a-479e-8ed5-cff6df83a7ba","Type":"ContainerDied","Data":"e1e0892ce7db405beef695d1fd0a5fd20b48a2e2152ed795ed2cec0f90bda032"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.969247 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1e0892ce7db405beef695d1fd0a5fd20b48a2e2152ed795ed2cec0f90bda032" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.972299 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d97b-account-create-cxhsn" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.972314 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d97b-account-create-cxhsn" event={"ID":"b57c7d3c-d5f8-45d9-bf1b-51ec05afd543","Type":"ContainerDied","Data":"1992996a37781b54261b0079fea6b34d95716efc28cbeec7ce138651fd26d773"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.972368 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1992996a37781b54261b0079fea6b34d95716efc28cbeec7ce138651fd26d773" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.976433 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerStarted","Data":"1ffd6b68fea8630375351ce4991f18288751c9de3f864f163aa877b421ce3654"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.978887 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3a61-account-create-77s5z" event={"ID":"e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59","Type":"ContainerDied","Data":"e45a5c2f47ff602b8564e7902a3d9f4a662b9a94dcdba0ae76885b0be37f3b72"} Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.978943 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e45a5c2f47ff602b8564e7902a3d9f4a662b9a94dcdba0ae76885b0be37f3b72" Nov 21 19:18:28 crc kubenswrapper[4701]: I1121 19:18:28.978967 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3a61-account-create-77s5z" Nov 21 19:18:30 crc kubenswrapper[4701]: I1121 19:18:30.554006 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:18:30 crc kubenswrapper[4701]: I1121 19:18:30.565512 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bf8d5d78-fa29-41ff-94e0-6249f7e02e1b-etc-swift\") pod \"swift-storage-0\" (UID: \"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b\") " pod="openstack/swift-storage-0" Nov 21 19:18:30 crc kubenswrapper[4701]: I1121 19:18:30.826931 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.033473 4701 generic.go:334] "Generic (PLEG): container finished" podID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerID="e4570b453a88dbab2e9d7e0002bd8c806baddbf864333070457a2d3f8f6d6688" exitCode=0 Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.033586 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3","Type":"ContainerDied","Data":"e4570b453a88dbab2e9d7e0002bd8c806baddbf864333070457a2d3f8f6d6688"} Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.041359 4701 generic.go:334] "Generic (PLEG): container finished" podID="fa567817-ce17-4cb3-9e55-e14902a96420" containerID="c9a4925d1edeed0563739e550804f57f15ef77a43f535f1feebf4723ac7ba8c6" exitCode=0 Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.041431 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"fa567817-ce17-4cb3-9e55-e14902a96420","Type":"ContainerDied","Data":"c9a4925d1edeed0563739e550804f57f15ef77a43f535f1feebf4723ac7ba8c6"} Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.096311 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.106626 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vqwr8" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.374285 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-49p6k-config-9vjhv"] Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375245 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375260 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375273 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="dnsmasq-dns" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375279 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="dnsmasq-dns" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375291 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="init" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375298 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="init" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375305 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1708d089-4719-4de2-af63-748de46758d4" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375311 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1708d089-4719-4de2-af63-748de46758d4" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375329 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5bfc4d-be86-4362-bd2f-9dc613af744a" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375338 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5bfc4d-be86-4362-bd2f-9dc613af744a" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375349 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae19d0b9-054a-479e-8ed5-cff6df83a7ba" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375355 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae19d0b9-054a-479e-8ed5-cff6df83a7ba" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375367 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="372e7b57-848c-4342-a448-3b3cb9b80aa4" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375373 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="372e7b57-848c-4342-a448-3b3cb9b80aa4" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: E1121 19:18:32.375395 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.375402 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380476 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e5bfc4d-be86-4362-bd2f-9dc613af744a" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380530 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae19d0b9-054a-479e-8ed5-cff6df83a7ba" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380566 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="372e7b57-848c-4342-a448-3b3cb9b80aa4" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380586 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a044ec4b-a230-4337-a491-b48d8e6a03ec" containerName="dnsmasq-dns" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380606 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1708d089-4719-4de2-af63-748de46758d4" containerName="mariadb-database-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380641 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.380653 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" containerName="mariadb-account-create" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.381775 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.385733 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.386623 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-49p6k-config-9vjhv"] Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.401667 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.401790 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run-ovn\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.401819 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-additional-scripts\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.401864 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq544\" (UniqueName: \"kubernetes.io/projected/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-kube-api-access-qq544\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.401904 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-scripts\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.402146 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-log-ovn\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.503513 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run-ovn\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.503568 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-additional-scripts\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.503602 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq544\" (UniqueName: \"kubernetes.io/projected/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-kube-api-access-qq544\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.503652 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-scripts\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.503720 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-log-ovn\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.503754 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.504332 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.504331 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run-ovn\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.504421 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-log-ovn\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.505001 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-additional-scripts\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.506418 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-scripts\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.533677 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq544\" (UniqueName: \"kubernetes.io/projected/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-kube-api-access-qq544\") pod \"ovn-controller-49p6k-config-9vjhv\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.703226 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 21 19:18:32 crc kubenswrapper[4701]: W1121 19:18:32.707552 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf8d5d78_fa29_41ff_94e0_6249f7e02e1b.slice/crio-1d1207fed82ff8b64592181ebcedccf8e696183fd850f663a36a6a578f01ef2b WatchSource:0}: Error finding container 1d1207fed82ff8b64592181ebcedccf8e696183fd850f663a36a6a578f01ef2b: Status 404 returned error can't find the container with id 1d1207fed82ff8b64592181ebcedccf8e696183fd850f663a36a6a578f01ef2b Nov 21 19:18:32 crc kubenswrapper[4701]: I1121 19:18:32.787618 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.057331 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"fa567817-ce17-4cb3-9e55-e14902a96420","Type":"ContainerStarted","Data":"6871f8ab2a0b9e0f15d9fdbd0998c5c23ec8d2883f83e6f0b0c62564126709ed"} Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.057990 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.059711 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"1d1207fed82ff8b64592181ebcedccf8e696183fd850f663a36a6a578f01ef2b"} Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.060759 4701 generic.go:334] "Generic (PLEG): container finished" podID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerID="22e1855bd9bfa3a229c0412c5188f4203d022bce21d3827cf5280bada0841afd" exitCode=0 Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.060809 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02","Type":"ContainerDied","Data":"22e1855bd9bfa3a229c0412c5188f4203d022bce21d3827cf5280bada0841afd"} Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.066788 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerStarted","Data":"01a1067b87715b306813bbc60be375c4084fa0a333ffd481e138fd66293d008d"} Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.069939 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3","Type":"ContainerStarted","Data":"36ec982cd779dffc1e34f70f4637cd70db0da4c97841b60626b44a49cb751f98"} Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.070454 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.087497 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=55.941849168 podStartE2EDuration="1m3.087471751s" podCreationTimestamp="2025-11-21 19:17:30 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.469821348 +0000 UTC m=+961.254961375" lastFinishedPulling="2025-11-21 19:17:57.615443931 +0000 UTC m=+968.400583958" observedRunningTime="2025-11-21 19:18:33.085737295 +0000 UTC m=+1003.870877322" watchObservedRunningTime="2025-11-21 19:18:33.087471751 +0000 UTC m=+1003.872611778" Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.113219 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=14.377833735 podStartE2EDuration="56.113172498s" podCreationTimestamp="2025-11-21 19:17:37 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.493132221 +0000 UTC m=+961.278272248" lastFinishedPulling="2025-11-21 19:18:32.228470984 +0000 UTC m=+1003.013611011" observedRunningTime="2025-11-21 19:18:33.10725364 +0000 UTC m=+1003.892393667" watchObservedRunningTime="2025-11-21 19:18:33.113172498 +0000 UTC m=+1003.898312525" Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.197855 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=57.210475296 podStartE2EDuration="1m4.197830244s" podCreationTimestamp="2025-11-21 19:17:29 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.473448995 +0000 UTC m=+961.258589022" lastFinishedPulling="2025-11-21 19:17:57.460803943 +0000 UTC m=+968.245943970" observedRunningTime="2025-11-21 19:18:33.191590767 +0000 UTC m=+1003.976730794" watchObservedRunningTime="2025-11-21 19:18:33.197830244 +0000 UTC m=+1003.982970271" Nov 21 19:18:33 crc kubenswrapper[4701]: I1121 19:18:33.309013 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-49p6k-config-9vjhv"] Nov 21 19:18:33 crc kubenswrapper[4701]: W1121 19:18:33.643974 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod328ebecd_aa3a_4ceb_ab87_ce00ad3dffab.slice/crio-a0e937e5e8491ba7dc1f37004388563c626e20fe389ec66fb753311348f574c3 WatchSource:0}: Error finding container a0e937e5e8491ba7dc1f37004388563c626e20fe389ec66fb753311348f574c3: Status 404 returned error can't find the container with id a0e937e5e8491ba7dc1f37004388563c626e20fe389ec66fb753311348f574c3 Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.036642 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.082337 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"baf2d3eb8866b52022926b36e832ad12ef0d311b2d6eeb52faf29877c9c95fb9"} Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.082395 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"54ff25dbf1f6d71108dd861b6e64a6ef1e62ef94706b6fb057598763c6a7414c"} Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.087845 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02","Type":"ContainerStarted","Data":"ec26fe58e71ed5e823c8c6c8ac6317b45cf155e256eebfa23596701c45b7ed26"} Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.088104 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.091281 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-49p6k-config-9vjhv" event={"ID":"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab","Type":"ContainerStarted","Data":"354bf0446dc64d7962956bdb2c8cc22f747a2ec9da2ed6d8a1d35605511c6387"} Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.091316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-49p6k-config-9vjhv" event={"ID":"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab","Type":"ContainerStarted","Data":"a0e937e5e8491ba7dc1f37004388563c626e20fe389ec66fb753311348f574c3"} Nov 21 19:18:34 crc kubenswrapper[4701]: I1121 19:18:34.141801 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=57.023762699 podStartE2EDuration="1m4.141775953s" podCreationTimestamp="2025-11-21 19:17:30 +0000 UTC" firstStartedPulling="2025-11-21 19:17:50.494377965 +0000 UTC m=+961.279517992" lastFinishedPulling="2025-11-21 19:17:57.612391219 +0000 UTC m=+968.397531246" observedRunningTime="2025-11-21 19:18:34.129192787 +0000 UTC m=+1004.914332814" watchObservedRunningTime="2025-11-21 19:18:34.141775953 +0000 UTC m=+1004.926915980" Nov 21 19:18:35 crc kubenswrapper[4701]: I1121 19:18:35.104284 4701 generic.go:334] "Generic (PLEG): container finished" podID="328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" containerID="354bf0446dc64d7962956bdb2c8cc22f747a2ec9da2ed6d8a1d35605511c6387" exitCode=0 Nov 21 19:18:35 crc kubenswrapper[4701]: I1121 19:18:35.105226 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-49p6k-config-9vjhv" event={"ID":"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab","Type":"ContainerDied","Data":"354bf0446dc64d7962956bdb2c8cc22f747a2ec9da2ed6d8a1d35605511c6387"} Nov 21 19:18:35 crc kubenswrapper[4701]: I1121 19:18:35.110328 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"9acae7839ee28ffa6de9a4f48c5f98dc241f57e149464ae35b85375e32b47a97"} Nov 21 19:18:35 crc kubenswrapper[4701]: I1121 19:18:35.110378 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"c91a5726780c3f1a7a3218ee5cee37bb7b2e1b7a704ebae79f94bfba984f9d88"} Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.122815 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"a9af8ee84aee0196baf8a434051dbafcc9adae4b9cc19af4b72dd402acadd0f3"} Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.123967 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"514ee2da1418bf6a0bd427daec39445eca4289e207b0ad5e52e69c95e5ccd0b8"} Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.123991 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"0905ae8e061d368f33205d555a22378c366d93dcf3a71e718ff61b0ef0af4478"} Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.512604 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.593996 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run\") pod \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594048 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qq544\" (UniqueName: \"kubernetes.io/projected/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-kube-api-access-qq544\") pod \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594134 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-additional-scripts\") pod \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594246 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-scripts\") pod \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594273 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-log-ovn\") pod \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594294 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run-ovn\") pod \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\" (UID: \"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab\") " Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594763 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" (UID: "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.594802 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run" (OuterVolumeSpecName: "var-run") pod "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" (UID: "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.595877 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" (UID: "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.596056 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-scripts" (OuterVolumeSpecName: "scripts") pod "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" (UID: "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.596165 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" (UID: "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.602227 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-kube-api-access-qq544" (OuterVolumeSpecName: "kube-api-access-qq544") pod "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" (UID: "328ebecd-aa3a-4ceb-ab87-ce00ad3dffab"). InnerVolumeSpecName "kube-api-access-qq544". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.696702 4701 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.696744 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qq544\" (UniqueName: \"kubernetes.io/projected/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-kube-api-access-qq544\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.696758 4701 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.696768 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.696779 4701 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:36 crc kubenswrapper[4701]: I1121 19:18:36.696789 4701 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.135406 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"d64fe8fb8d7e58642f0352db202a9fbcd94e41714186ca0f4d081dcf39d93299"} Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.135781 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"acb39fb3755b995f995609764c05680530840dbc6c2a1fb291ce4ed7ee22abe0"} Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.156711 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-49p6k-config-9vjhv" event={"ID":"328ebecd-aa3a-4ceb-ab87-ce00ad3dffab","Type":"ContainerDied","Data":"a0e937e5e8491ba7dc1f37004388563c626e20fe389ec66fb753311348f574c3"} Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.156774 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0e937e5e8491ba7dc1f37004388563c626e20fe389ec66fb753311348f574c3" Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.156791 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-49p6k-config-9vjhv" Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.648241 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-49p6k-config-9vjhv"] Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.655968 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-49p6k-config-9vjhv"] Nov 21 19:18:37 crc kubenswrapper[4701]: I1121 19:18:37.986514 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" path="/var/lib/kubelet/pods/328ebecd-aa3a-4ceb-ab87-ce00ad3dffab/volumes" Nov 21 19:18:38 crc kubenswrapper[4701]: I1121 19:18:38.170368 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"f389a39f7c5f16565586d60d8f60e8dd3ebd5672e355ea1ac13933ad47e04528"} Nov 21 19:18:38 crc kubenswrapper[4701]: I1121 19:18:38.170422 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"4bda9d7426c92a6c06f665d151ce44f648ddae318f8fbe6cd726d6c3ebd7977e"} Nov 21 19:18:38 crc kubenswrapper[4701]: I1121 19:18:38.170440 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"8b33e09f661044881741b8e9ae77f129342bee54af43981dd86beed15675de06"} Nov 21 19:18:38 crc kubenswrapper[4701]: I1121 19:18:38.170448 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"103a5153fb7270f9c4b4121f1aa5cf8ba2c9ebefc1f93de5893af56dd17cb2bd"} Nov 21 19:18:38 crc kubenswrapper[4701]: I1121 19:18:38.170458 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"aad644ac8e2982c87a1db87a0bf882837a93779142b6ecbef8ba18d9bf1a23cd"} Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.035866 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.041457 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.188643 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"bf8d5d78-fa29-41ff-94e0-6249f7e02e1b","Type":"ContainerStarted","Data":"bced762920c9362ee050374b1133878552b73e2ff55692d4bc4ef51bf6cb80cd"} Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.193604 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.242577 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.073239008 podStartE2EDuration="42.242548227s" podCreationTimestamp="2025-11-21 19:17:57 +0000 UTC" firstStartedPulling="2025-11-21 19:18:32.710108762 +0000 UTC m=+1003.495248789" lastFinishedPulling="2025-11-21 19:18:36.879417941 +0000 UTC m=+1007.664558008" observedRunningTime="2025-11-21 19:18:39.233115875 +0000 UTC m=+1010.018255912" watchObservedRunningTime="2025-11-21 19:18:39.242548227 +0000 UTC m=+1010.027688294" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.647836 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85789dd45c-hpddc"] Nov 21 19:18:39 crc kubenswrapper[4701]: E1121 19:18:39.648465 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" containerName="ovn-config" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.648487 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" containerName="ovn-config" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.648683 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="328ebecd-aa3a-4ceb-ab87-ce00ad3dffab" containerName="ovn-config" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.649960 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.668929 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.673385 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85789dd45c-hpddc"] Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.750974 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-svc\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.751167 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-swift-storage-0\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.751221 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-nb\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.751275 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-config\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.751303 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmtvj\" (UniqueName: \"kubernetes.io/projected/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-kube-api-access-fmtvj\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.751335 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-sb\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.853693 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-swift-storage-0\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.853772 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-nb\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.853819 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-config\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.853838 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmtvj\" (UniqueName: \"kubernetes.io/projected/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-kube-api-access-fmtvj\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.853863 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-sb\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.853901 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-svc\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.855236 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-config\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.855552 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-nb\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.855580 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-swift-storage-0\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.855668 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-sb\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.855785 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-svc\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.876678 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmtvj\" (UniqueName: \"kubernetes.io/projected/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-kube-api-access-fmtvj\") pod \"dnsmasq-dns-85789dd45c-hpddc\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:39 crc kubenswrapper[4701]: I1121 19:18:39.970865 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:40 crc kubenswrapper[4701]: I1121 19:18:40.507356 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85789dd45c-hpddc"] Nov 21 19:18:40 crc kubenswrapper[4701]: W1121 19:18:40.513517 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode90a4e3a_8c3b_44f4_9df1_c36f3aeff9ef.slice/crio-b32dfaf2982e236da1a92d2ff44312e9f6c21ef7487298721daf4eeac6e8d88a WatchSource:0}: Error finding container b32dfaf2982e236da1a92d2ff44312e9f6c21ef7487298721daf4eeac6e8d88a: Status 404 returned error can't find the container with id b32dfaf2982e236da1a92d2ff44312e9f6c21ef7487298721daf4eeac6e8d88a Nov 21 19:18:41 crc kubenswrapper[4701]: I1121 19:18:41.241424 4701 generic.go:334] "Generic (PLEG): container finished" podID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerID="23976f19ebe00d7956f6e0886f759c5975d5f1211cd74b2153d838ba0ee0d812" exitCode=0 Nov 21 19:18:41 crc kubenswrapper[4701]: I1121 19:18:41.241765 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" event={"ID":"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef","Type":"ContainerDied","Data":"23976f19ebe00d7956f6e0886f759c5975d5f1211cd74b2153d838ba0ee0d812"} Nov 21 19:18:41 crc kubenswrapper[4701]: I1121 19:18:41.241798 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" event={"ID":"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef","Type":"ContainerStarted","Data":"b32dfaf2982e236da1a92d2ff44312e9f6c21ef7487298721daf4eeac6e8d88a"} Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.255029 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" event={"ID":"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef","Type":"ContainerStarted","Data":"218230f11f6a75f5f8c3f8e1683007a2bc0d9c050f4727d2a8f170029f3651d3"} Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.256443 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.275990 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podStartSLOduration=3.275962929 podStartE2EDuration="3.275962929s" podCreationTimestamp="2025-11-21 19:18:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:18:42.273556775 +0000 UTC m=+1013.058696812" watchObservedRunningTime="2025-11-21 19:18:42.275962929 +0000 UTC m=+1013.061102956" Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.441114 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.442029 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="config-reloader" containerID="cri-o://1ffd6b68fea8630375351ce4991f18288751c9de3f864f163aa877b421ce3654" gracePeriod=600 Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.442083 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="thanos-sidecar" containerID="cri-o://01a1067b87715b306813bbc60be375c4084fa0a333ffd481e138fd66293d008d" gracePeriod=600 Nov 21 19:18:42 crc kubenswrapper[4701]: I1121 19:18:42.442189 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="prometheus" containerID="cri-o://c187480c727f299bcd2020511bfa84e39c898b0ca7849ceb4a8712f49642daf5" gracePeriod=600 Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.271387 4701 generic.go:334] "Generic (PLEG): container finished" podID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerID="01a1067b87715b306813bbc60be375c4084fa0a333ffd481e138fd66293d008d" exitCode=0 Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.271828 4701 generic.go:334] "Generic (PLEG): container finished" podID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerID="1ffd6b68fea8630375351ce4991f18288751c9de3f864f163aa877b421ce3654" exitCode=0 Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.271850 4701 generic.go:334] "Generic (PLEG): container finished" podID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerID="c187480c727f299bcd2020511bfa84e39c898b0ca7849ceb4a8712f49642daf5" exitCode=0 Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.271465 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerDied","Data":"01a1067b87715b306813bbc60be375c4084fa0a333ffd481e138fd66293d008d"} Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.271937 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerDied","Data":"1ffd6b68fea8630375351ce4991f18288751c9de3f864f163aa877b421ce3654"} Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.271952 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerDied","Data":"c187480c727f299bcd2020511bfa84e39c898b0ca7849ceb4a8712f49642daf5"} Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.415383 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547607 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-thanos-prometheus-http-client-file\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547666 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config-out\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547795 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/79fa8bdc-6516-4a53-8e96-17a297ac82b7-prometheus-metric-storage-rulefiles-0\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547833 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-web-config\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547876 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rc2xm\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-kube-api-access-rc2xm\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547903 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-tls-assets\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.547955 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.548096 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\" (UID: \"79fa8bdc-6516-4a53-8e96-17a297ac82b7\") " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.548885 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79fa8bdc-6516-4a53-8e96-17a297ac82b7-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.556477 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.557107 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config" (OuterVolumeSpecName: "config") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.557394 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.557663 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-kube-api-access-rc2xm" (OuterVolumeSpecName: "kube-api-access-rc2xm") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "kube-api-access-rc2xm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.563260 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config-out" (OuterVolumeSpecName: "config-out") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.576497 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "pvc-7756673b-01d8-4e24-be57-9b42676a4870". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.585337 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-web-config" (OuterVolumeSpecName: "web-config") pod "79fa8bdc-6516-4a53-8e96-17a297ac82b7" (UID: "79fa8bdc-6516-4a53-8e96-17a297ac82b7"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650665 4701 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650710 4701 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config-out\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650724 4701 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/79fa8bdc-6516-4a53-8e96-17a297ac82b7-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650738 4701 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-web-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650748 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rc2xm\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-kube-api-access-rc2xm\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650756 4701 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/79fa8bdc-6516-4a53-8e96-17a297ac82b7-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650765 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/79fa8bdc-6516-4a53-8e96-17a297ac82b7-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.650807 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") on node \"crc\" " Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.671766 4701 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.671909 4701 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7756673b-01d8-4e24-be57-9b42676a4870" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870") on node "crc" Nov 21 19:18:43 crc kubenswrapper[4701]: I1121 19:18:43.751935 4701 reconciler_common.go:293] "Volume detached for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.293580 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.294397 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"79fa8bdc-6516-4a53-8e96-17a297ac82b7","Type":"ContainerDied","Data":"2dee2e60062e975f329ac77626d55748ee4ed70fcace3e07fcd9ea83a8eeb5d7"} Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.294495 4701 scope.go:117] "RemoveContainer" containerID="01a1067b87715b306813bbc60be375c4084fa0a333ffd481e138fd66293d008d" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.320804 4701 scope.go:117] "RemoveContainer" containerID="1ffd6b68fea8630375351ce4991f18288751c9de3f864f163aa877b421ce3654" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.343258 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.348491 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.354121 4701 scope.go:117] "RemoveContainer" containerID="c187480c727f299bcd2020511bfa84e39c898b0ca7849ceb4a8712f49642daf5" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.388514 4701 scope.go:117] "RemoveContainer" containerID="f0ca2e7b3ac945a8838678f89fec58d115b4447f42315ce05d64579930fba011" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.406117 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:18:44 crc kubenswrapper[4701]: E1121 19:18:44.406526 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="prometheus" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.406541 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="prometheus" Nov 21 19:18:44 crc kubenswrapper[4701]: E1121 19:18:44.406556 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="init-config-reloader" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.406563 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="init-config-reloader" Nov 21 19:18:44 crc kubenswrapper[4701]: E1121 19:18:44.406586 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="config-reloader" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.406595 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="config-reloader" Nov 21 19:18:44 crc kubenswrapper[4701]: E1121 19:18:44.406606 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="thanos-sidecar" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.406611 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="thanos-sidecar" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.406776 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="config-reloader" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.408327 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="thanos-sidecar" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.408376 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" containerName="prometheus" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.413120 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.437226 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.438033 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.438143 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.438324 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.439433 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-p9vfl" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.439642 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.447564 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.556671 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.572779 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46sxk\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-kube-api-access-46sxk\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.572869 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.572919 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.572944 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.572968 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.573007 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.573035 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.573070 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.573099 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.573158 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.573176 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674744 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674815 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674847 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674880 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674907 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674940 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674963 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.674990 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.675039 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.675058 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.675098 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46sxk\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-kube-api-access-46sxk\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.675696 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.680947 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.681885 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.683405 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.684753 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.690756 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.692723 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.695408 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.695412 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.709928 4701 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.709985 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cac188d07dbda74642d10d9af8d31d97e15d9a3dab466103b81232fdd62bf350/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.722973 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46sxk\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-kube-api-access-46sxk\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:44 crc kubenswrapper[4701]: I1121 19:18:44.884007 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:45 crc kubenswrapper[4701]: I1121 19:18:45.090311 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:18:45 crc kubenswrapper[4701]: I1121 19:18:45.658063 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:18:45 crc kubenswrapper[4701]: W1121 19:18:45.668390 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7d6de4b_996c_4ea4_a099_b0c98d7cc3ad.slice/crio-d1deff42d4b13a99d4cced8cd0a2bd65559e9c25d780480996b678f094caf1ba WatchSource:0}: Error finding container d1deff42d4b13a99d4cced8cd0a2bd65559e9c25d780480996b678f094caf1ba: Status 404 returned error can't find the container with id d1deff42d4b13a99d4cced8cd0a2bd65559e9c25d780480996b678f094caf1ba Nov 21 19:18:45 crc kubenswrapper[4701]: I1121 19:18:45.961536 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79fa8bdc-6516-4a53-8e96-17a297ac82b7" path="/var/lib/kubelet/pods/79fa8bdc-6516-4a53-8e96-17a297ac82b7/volumes" Nov 21 19:18:46 crc kubenswrapper[4701]: I1121 19:18:46.319592 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerStarted","Data":"d1deff42d4b13a99d4cced8cd0a2bd65559e9c25d780480996b678f094caf1ba"} Nov 21 19:18:46 crc kubenswrapper[4701]: I1121 19:18:46.992749 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-49p6k" Nov 21 19:18:49 crc kubenswrapper[4701]: I1121 19:18:49.357471 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerStarted","Data":"ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de"} Nov 21 19:18:49 crc kubenswrapper[4701]: I1121 19:18:49.975354 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.074309 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d9b597-bpqmn"] Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.074768 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="dnsmasq-dns" containerID="cri-o://c410626c6c1910a555d22df9b01e01c674f4a44ae932b15e05e7345be65bbe8f" gracePeriod=10 Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.379853 4701 generic.go:334] "Generic (PLEG): container finished" podID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerID="c410626c6c1910a555d22df9b01e01c674f4a44ae932b15e05e7345be65bbe8f" exitCode=0 Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.379975 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" event={"ID":"7b56d49b-38ad-4fea-9a4b-b6400d7edce9","Type":"ContainerDied","Data":"c410626c6c1910a555d22df9b01e01c674f4a44ae932b15e05e7345be65bbe8f"} Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.600078 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.709001 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-dns-svc\") pod \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.709220 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-config\") pod \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.709258 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znj5f\" (UniqueName: \"kubernetes.io/projected/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-kube-api-access-znj5f\") pod \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.709369 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-sb\") pod \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.709462 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-nb\") pod \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\" (UID: \"7b56d49b-38ad-4fea-9a4b-b6400d7edce9\") " Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.718747 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-kube-api-access-znj5f" (OuterVolumeSpecName: "kube-api-access-znj5f") pod "7b56d49b-38ad-4fea-9a4b-b6400d7edce9" (UID: "7b56d49b-38ad-4fea-9a4b-b6400d7edce9"). InnerVolumeSpecName "kube-api-access-znj5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.756791 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7b56d49b-38ad-4fea-9a4b-b6400d7edce9" (UID: "7b56d49b-38ad-4fea-9a4b-b6400d7edce9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.765714 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7b56d49b-38ad-4fea-9a4b-b6400d7edce9" (UID: "7b56d49b-38ad-4fea-9a4b-b6400d7edce9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.767534 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-config" (OuterVolumeSpecName: "config") pod "7b56d49b-38ad-4fea-9a4b-b6400d7edce9" (UID: "7b56d49b-38ad-4fea-9a4b-b6400d7edce9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.770730 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7b56d49b-38ad-4fea-9a4b-b6400d7edce9" (UID: "7b56d49b-38ad-4fea-9a4b-b6400d7edce9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.811979 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.812036 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.812047 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znj5f\" (UniqueName: \"kubernetes.io/projected/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-kube-api-access-znj5f\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.812081 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:50 crc kubenswrapper[4701]: I1121 19:18:50.812091 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b56d49b-38ad-4fea-9a4b-b6400d7edce9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.142771 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.108:5671: connect: connection refused" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.395000 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" event={"ID":"7b56d49b-38ad-4fea-9a4b-b6400d7edce9","Type":"ContainerDied","Data":"3ece02819083326ef3828a84ab469d1885894b15842acc9959b124f3ac26dfcf"} Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.395132 4701 scope.go:117] "RemoveContainer" containerID="c410626c6c1910a555d22df9b01e01c674f4a44ae932b15e05e7345be65bbe8f" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.396047 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.423619 4701 scope.go:117] "RemoveContainer" containerID="56c7a689e48903eb02263d6981ae3f8bbfabcec4e317bf11c7228f43f30fb6a7" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.447097 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d9b597-bpqmn"] Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.454836 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59d9b597-bpqmn"] Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.485416 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.109:5671: connect: connection refused" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.802460 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="fa567817-ce17-4cb3-9e55-e14902a96420" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.110:5671: connect: connection refused" Nov 21 19:18:51 crc kubenswrapper[4701]: I1121 19:18:51.965988 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" path="/var/lib/kubelet/pods/7b56d49b-38ad-4fea-9a4b-b6400d7edce9/volumes" Nov 21 19:18:55 crc kubenswrapper[4701]: I1121 19:18:55.581868 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-59d9b597-bpqmn" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: i/o timeout" Nov 21 19:18:57 crc kubenswrapper[4701]: I1121 19:18:57.475362 4701 generic.go:334] "Generic (PLEG): container finished" podID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerID="ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de" exitCode=0 Nov 21 19:18:57 crc kubenswrapper[4701]: I1121 19:18:57.475474 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerDied","Data":"ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de"} Nov 21 19:18:58 crc kubenswrapper[4701]: I1121 19:18:58.490444 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerStarted","Data":"47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac"} Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.143512 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.465921 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-r44r6"] Nov 21 19:19:01 crc kubenswrapper[4701]: E1121 19:19:01.466434 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="dnsmasq-dns" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.466457 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="dnsmasq-dns" Nov 21 19:19:01 crc kubenswrapper[4701]: E1121 19:19:01.466472 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="init" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.466480 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="init" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.466710 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b56d49b-38ad-4fea-9a4b-b6400d7edce9" containerName="dnsmasq-dns" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.467586 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.478441 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-r44r6"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.485429 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.561731 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-66xr9"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.562975 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.575365 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-66xr9"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.583076 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-operator-scripts\") pod \"cinder-db-create-r44r6\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.583188 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46r9v\" (UniqueName: \"kubernetes.io/projected/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-kube-api-access-46r9v\") pod \"cinder-db-create-r44r6\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.607553 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3b77-account-create-46vm2"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.609113 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.614310 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.638978 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3b77-account-create-46vm2"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.690941 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46r9v\" (UniqueName: \"kubernetes.io/projected/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-kube-api-access-46r9v\") pod \"cinder-db-create-r44r6\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.691043 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8hqc\" (UniqueName: \"kubernetes.io/projected/556faa3b-2540-4344-b293-b68e892e4459-kube-api-access-k8hqc\") pod \"barbican-db-create-66xr9\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.691106 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/556faa3b-2540-4344-b293-b68e892e4459-operator-scripts\") pod \"barbican-db-create-66xr9\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.691140 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w49dz\" (UniqueName: \"kubernetes.io/projected/0ca2f6e9-d0f5-44ce-8562-74480c80d847-kube-api-access-w49dz\") pod \"cinder-3b77-account-create-46vm2\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.691243 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-operator-scripts\") pod \"cinder-db-create-r44r6\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.691298 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ca2f6e9-d0f5-44ce-8562-74480c80d847-operator-scripts\") pod \"cinder-3b77-account-create-46vm2\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.692631 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-operator-scripts\") pod \"cinder-db-create-r44r6\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.702450 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-958c-account-create-dzqch"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.704469 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.712256 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.722064 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46r9v\" (UniqueName: \"kubernetes.io/projected/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-kube-api-access-46r9v\") pod \"cinder-db-create-r44r6\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.735471 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-958c-account-create-dzqch"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.790516 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.793058 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w49dz\" (UniqueName: \"kubernetes.io/projected/0ca2f6e9-d0f5-44ce-8562-74480c80d847-kube-api-access-w49dz\") pod \"cinder-3b77-account-create-46vm2\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.793167 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/751ba251-758b-4c84-afb8-90205e9cb779-operator-scripts\") pod \"barbican-958c-account-create-dzqch\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.793210 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfhw5\" (UniqueName: \"kubernetes.io/projected/751ba251-758b-4c84-afb8-90205e9cb779-kube-api-access-jfhw5\") pod \"barbican-958c-account-create-dzqch\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.793280 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ca2f6e9-d0f5-44ce-8562-74480c80d847-operator-scripts\") pod \"cinder-3b77-account-create-46vm2\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.793340 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8hqc\" (UniqueName: \"kubernetes.io/projected/556faa3b-2540-4344-b293-b68e892e4459-kube-api-access-k8hqc\") pod \"barbican-db-create-66xr9\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.793384 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/556faa3b-2540-4344-b293-b68e892e4459-operator-scripts\") pod \"barbican-db-create-66xr9\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.794303 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/556faa3b-2540-4344-b293-b68e892e4459-operator-scripts\") pod \"barbican-db-create-66xr9\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.794532 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ca2f6e9-d0f5-44ce-8562-74480c80d847-operator-scripts\") pod \"cinder-3b77-account-create-46vm2\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.810750 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.895872 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/751ba251-758b-4c84-afb8-90205e9cb779-operator-scripts\") pod \"barbican-958c-account-create-dzqch\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.895941 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfhw5\" (UniqueName: \"kubernetes.io/projected/751ba251-758b-4c84-afb8-90205e9cb779-kube-api-access-jfhw5\") pod \"barbican-958c-account-create-dzqch\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.897241 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/751ba251-758b-4c84-afb8-90205e9cb779-operator-scripts\") pod \"barbican-958c-account-create-dzqch\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.947104 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-vkw87"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.948541 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.951091 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-27z5d" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.951282 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.951423 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.954190 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.962977 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-vkw87"] Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.997284 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-config-data\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.997390 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtfbb\" (UniqueName: \"kubernetes.io/projected/b122f197-4d99-467c-b60c-c2b7912244ba-kube-api-access-gtfbb\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:01 crc kubenswrapper[4701]: I1121 19:19:01.997471 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-combined-ca-bundle\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.002361 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w49dz\" (UniqueName: \"kubernetes.io/projected/0ca2f6e9-d0f5-44ce-8562-74480c80d847-kube-api-access-w49dz\") pod \"cinder-3b77-account-create-46vm2\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.006224 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8hqc\" (UniqueName: \"kubernetes.io/projected/556faa3b-2540-4344-b293-b68e892e4459-kube-api-access-k8hqc\") pod \"barbican-db-create-66xr9\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.007392 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfhw5\" (UniqueName: \"kubernetes.io/projected/751ba251-758b-4c84-afb8-90205e9cb779-kube-api-access-jfhw5\") pod \"barbican-958c-account-create-dzqch\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.064719 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.099861 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-combined-ca-bundle\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.099980 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-config-data\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.100040 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtfbb\" (UniqueName: \"kubernetes.io/projected/b122f197-4d99-467c-b60c-c2b7912244ba-kube-api-access-gtfbb\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.109084 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-combined-ca-bundle\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.109092 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-config-data\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.121678 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtfbb\" (UniqueName: \"kubernetes.io/projected/b122f197-4d99-467c-b60c-c2b7912244ba-kube-api-access-gtfbb\") pod \"keystone-db-sync-vkw87\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.178716 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.208503 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.227793 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.404766 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-r44r6"] Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.533291 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-r44r6" event={"ID":"c5ce0222-5363-4b1d-acd2-f8b5a319ad24","Type":"ContainerStarted","Data":"4e8270c6ca71736b725971701941636bf1b3aba4d8056c56c80dc685637d1518"} Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.685882 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-958c-account-create-dzqch"] Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.827811 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-66xr9"] Nov 21 19:19:02 crc kubenswrapper[4701]: W1121 19:19:02.836186 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod556faa3b_2540_4344_b293_b68e892e4459.slice/crio-3cf1dafd155c2deb7ddc8b5aafd5997d2d934eca910c28ffb16712ab3ae17cf5 WatchSource:0}: Error finding container 3cf1dafd155c2deb7ddc8b5aafd5997d2d934eca910c28ffb16712ab3ae17cf5: Status 404 returned error can't find the container with id 3cf1dafd155c2deb7ddc8b5aafd5997d2d934eca910c28ffb16712ab3ae17cf5 Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.847255 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-vkw87"] Nov 21 19:19:02 crc kubenswrapper[4701]: W1121 19:19:02.854338 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb122f197_4d99_467c_b60c_c2b7912244ba.slice/crio-4e79d78d44eb9462900d2aa4199f41021a896c72fe5788b58fd3ae4c125e00ad WatchSource:0}: Error finding container 4e79d78d44eb9462900d2aa4199f41021a896c72fe5788b58fd3ae4c125e00ad: Status 404 returned error can't find the container with id 4e79d78d44eb9462900d2aa4199f41021a896c72fe5788b58fd3ae4c125e00ad Nov 21 19:19:02 crc kubenswrapper[4701]: W1121 19:19:02.855188 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ca2f6e9_d0f5_44ce_8562_74480c80d847.slice/crio-6f21c593184fb3cbade536106ef4e981dafeca6f2058f167229764c0e4b28ddb WatchSource:0}: Error finding container 6f21c593184fb3cbade536106ef4e981dafeca6f2058f167229764c0e4b28ddb: Status 404 returned error can't find the container with id 6f21c593184fb3cbade536106ef4e981dafeca6f2058f167229764c0e4b28ddb Nov 21 19:19:02 crc kubenswrapper[4701]: I1121 19:19:02.870289 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3b77-account-create-46vm2"] Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.554023 4701 generic.go:334] "Generic (PLEG): container finished" podID="c5ce0222-5363-4b1d-acd2-f8b5a319ad24" containerID="b34ed9dcc9a9eeea7f6ea5c9cbd5e400bbc53d6155478d416cb5f6588395fa39" exitCode=0 Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.554107 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-r44r6" event={"ID":"c5ce0222-5363-4b1d-acd2-f8b5a319ad24","Type":"ContainerDied","Data":"b34ed9dcc9a9eeea7f6ea5c9cbd5e400bbc53d6155478d416cb5f6588395fa39"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.556985 4701 generic.go:334] "Generic (PLEG): container finished" podID="751ba251-758b-4c84-afb8-90205e9cb779" containerID="a80d181868a6b8dd76586059b72078d0c221388e6852ede733d0655453e09a6f" exitCode=0 Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.557047 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-958c-account-create-dzqch" event={"ID":"751ba251-758b-4c84-afb8-90205e9cb779","Type":"ContainerDied","Data":"a80d181868a6b8dd76586059b72078d0c221388e6852ede733d0655453e09a6f"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.557073 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-958c-account-create-dzqch" event={"ID":"751ba251-758b-4c84-afb8-90205e9cb779","Type":"ContainerStarted","Data":"aed8747b4f5ff472862eb4e8ef988197fe85574c01ea6a0c9da49dc6ae1ac050"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.562884 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerStarted","Data":"15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.562930 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerStarted","Data":"0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.566789 4701 generic.go:334] "Generic (PLEG): container finished" podID="0ca2f6e9-d0f5-44ce-8562-74480c80d847" containerID="ccad98e4a82721a16bf14c894df82fe488525177df96540641c8eb6560eee3b9" exitCode=0 Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.566860 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3b77-account-create-46vm2" event={"ID":"0ca2f6e9-d0f5-44ce-8562-74480c80d847","Type":"ContainerDied","Data":"ccad98e4a82721a16bf14c894df82fe488525177df96540641c8eb6560eee3b9"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.566885 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3b77-account-create-46vm2" event={"ID":"0ca2f6e9-d0f5-44ce-8562-74480c80d847","Type":"ContainerStarted","Data":"6f21c593184fb3cbade536106ef4e981dafeca6f2058f167229764c0e4b28ddb"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.568452 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vkw87" event={"ID":"b122f197-4d99-467c-b60c-c2b7912244ba","Type":"ContainerStarted","Data":"4e79d78d44eb9462900d2aa4199f41021a896c72fe5788b58fd3ae4c125e00ad"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.570713 4701 generic.go:334] "Generic (PLEG): container finished" podID="556faa3b-2540-4344-b293-b68e892e4459" containerID="f54eebddfbea40c49d40447b53ea6b5e0e72b3205a9fc9dd810f3660ce854ec7" exitCode=0 Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.570753 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-66xr9" event={"ID":"556faa3b-2540-4344-b293-b68e892e4459","Type":"ContainerDied","Data":"f54eebddfbea40c49d40447b53ea6b5e0e72b3205a9fc9dd810f3660ce854ec7"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.570776 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-66xr9" event={"ID":"556faa3b-2540-4344-b293-b68e892e4459","Type":"ContainerStarted","Data":"3cf1dafd155c2deb7ddc8b5aafd5997d2d934eca910c28ffb16712ab3ae17cf5"} Nov 21 19:19:03 crc kubenswrapper[4701]: I1121 19:19:03.668488 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=19.668462861 podStartE2EDuration="19.668462861s" podCreationTimestamp="2025-11-21 19:18:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:19:03.66657916 +0000 UTC m=+1034.451719197" watchObservedRunningTime="2025-11-21 19:19:03.668462861 +0000 UTC m=+1034.453602878" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.224992 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-plj99"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.226251 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.244655 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-plj99"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.255611 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6jj5\" (UniqueName: \"kubernetes.io/projected/6adc43bb-8f76-4e28-9afb-7999845bd2ac-kube-api-access-d6jj5\") pod \"glance-db-create-plj99\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.255679 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6adc43bb-8f76-4e28-9afb-7999845bd2ac-operator-scripts\") pod \"glance-db-create-plj99\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.292410 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-xgwkm"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.294019 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.299497 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.300268 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-9p59w" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.304302 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-xgwkm"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.359003 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkrsj\" (UniqueName: \"kubernetes.io/projected/98d3b473-8ffd-47bb-a010-65c275226084-kube-api-access-hkrsj\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.359083 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-db-sync-config-data\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.359106 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-config-data\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.359534 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6jj5\" (UniqueName: \"kubernetes.io/projected/6adc43bb-8f76-4e28-9afb-7999845bd2ac-kube-api-access-d6jj5\") pod \"glance-db-create-plj99\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.359650 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-combined-ca-bundle\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.359806 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6adc43bb-8f76-4e28-9afb-7999845bd2ac-operator-scripts\") pod \"glance-db-create-plj99\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.360762 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6adc43bb-8f76-4e28-9afb-7999845bd2ac-operator-scripts\") pod \"glance-db-create-plj99\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.372902 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-20ac-account-create-h7hgn"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.376323 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.378650 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6jj5\" (UniqueName: \"kubernetes.io/projected/6adc43bb-8f76-4e28-9afb-7999845bd2ac-kube-api-access-d6jj5\") pod \"glance-db-create-plj99\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.378725 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.396191 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-20ac-account-create-h7hgn"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.461293 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbdec32f-0ec7-4585-a554-aebca2106122-operator-scripts\") pod \"glance-20ac-account-create-h7hgn\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.461367 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkrsj\" (UniqueName: \"kubernetes.io/projected/98d3b473-8ffd-47bb-a010-65c275226084-kube-api-access-hkrsj\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.461398 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-db-sync-config-data\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.461425 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-config-data\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.461450 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzp6m\" (UniqueName: \"kubernetes.io/projected/cbdec32f-0ec7-4585-a554-aebca2106122-kube-api-access-xzp6m\") pod \"glance-20ac-account-create-h7hgn\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.461522 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-combined-ca-bundle\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.465897 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-combined-ca-bundle\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.471510 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-db-sync-config-data\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.472022 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-config-data\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.478317 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkrsj\" (UniqueName: \"kubernetes.io/projected/98d3b473-8ffd-47bb-a010-65c275226084-kube-api-access-hkrsj\") pod \"watcher-db-sync-xgwkm\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.528817 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-7b44j"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.531779 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.543579 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-plj99" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.553385 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-7b44j"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.564876 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzp6m\" (UniqueName: \"kubernetes.io/projected/cbdec32f-0ec7-4585-a554-aebca2106122-kube-api-access-xzp6m\") pod \"glance-20ac-account-create-h7hgn\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.564990 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn9ss\" (UniqueName: \"kubernetes.io/projected/27fa1270-e5a1-467f-90e0-88c263d82c29-kube-api-access-jn9ss\") pod \"neutron-db-create-7b44j\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.565055 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa1270-e5a1-467f-90e0-88c263d82c29-operator-scripts\") pod \"neutron-db-create-7b44j\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.565096 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbdec32f-0ec7-4585-a554-aebca2106122-operator-scripts\") pod \"glance-20ac-account-create-h7hgn\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.565906 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbdec32f-0ec7-4585-a554-aebca2106122-operator-scripts\") pod \"glance-20ac-account-create-h7hgn\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.577479 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-3e07-account-create-xh8l9"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.579487 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.583045 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.585618 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3e07-account-create-xh8l9"] Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.589321 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzp6m\" (UniqueName: \"kubernetes.io/projected/cbdec32f-0ec7-4585-a554-aebca2106122-kube-api-access-xzp6m\") pod \"glance-20ac-account-create-h7hgn\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.626259 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.667023 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn9ss\" (UniqueName: \"kubernetes.io/projected/27fa1270-e5a1-467f-90e0-88c263d82c29-kube-api-access-jn9ss\") pod \"neutron-db-create-7b44j\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.667080 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79qbv\" (UniqueName: \"kubernetes.io/projected/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-kube-api-access-79qbv\") pod \"neutron-3e07-account-create-xh8l9\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.667156 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa1270-e5a1-467f-90e0-88c263d82c29-operator-scripts\") pod \"neutron-db-create-7b44j\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.667179 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-operator-scripts\") pod \"neutron-3e07-account-create-xh8l9\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.669125 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa1270-e5a1-467f-90e0-88c263d82c29-operator-scripts\") pod \"neutron-db-create-7b44j\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.702227 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn9ss\" (UniqueName: \"kubernetes.io/projected/27fa1270-e5a1-467f-90e0-88c263d82c29-kube-api-access-jn9ss\") pod \"neutron-db-create-7b44j\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.746649 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.773030 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-operator-scripts\") pod \"neutron-3e07-account-create-xh8l9\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.773167 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79qbv\" (UniqueName: \"kubernetes.io/projected/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-kube-api-access-79qbv\") pod \"neutron-3e07-account-create-xh8l9\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.779943 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-operator-scripts\") pod \"neutron-3e07-account-create-xh8l9\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.830151 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79qbv\" (UniqueName: \"kubernetes.io/projected/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-kube-api-access-79qbv\") pod \"neutron-3e07-account-create-xh8l9\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.862079 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:04 crc kubenswrapper[4701]: I1121 19:19:04.937368 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:05 crc kubenswrapper[4701]: I1121 19:19:05.094962 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 21 19:19:05 crc kubenswrapper[4701]: I1121 19:19:05.388196 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-plj99"] Nov 21 19:19:05 crc kubenswrapper[4701]: I1121 19:19:05.399648 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-xgwkm"] Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.385328 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.412141 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.438468 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.457368 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562254 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46r9v\" (UniqueName: \"kubernetes.io/projected/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-kube-api-access-46r9v\") pod \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562323 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/556faa3b-2540-4344-b293-b68e892e4459-operator-scripts\") pod \"556faa3b-2540-4344-b293-b68e892e4459\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562396 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfhw5\" (UniqueName: \"kubernetes.io/projected/751ba251-758b-4c84-afb8-90205e9cb779-kube-api-access-jfhw5\") pod \"751ba251-758b-4c84-afb8-90205e9cb779\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562450 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8hqc\" (UniqueName: \"kubernetes.io/projected/556faa3b-2540-4344-b293-b68e892e4459-kube-api-access-k8hqc\") pod \"556faa3b-2540-4344-b293-b68e892e4459\" (UID: \"556faa3b-2540-4344-b293-b68e892e4459\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562499 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w49dz\" (UniqueName: \"kubernetes.io/projected/0ca2f6e9-d0f5-44ce-8562-74480c80d847-kube-api-access-w49dz\") pod \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562531 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ca2f6e9-d0f5-44ce-8562-74480c80d847-operator-scripts\") pod \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\" (UID: \"0ca2f6e9-d0f5-44ce-8562-74480c80d847\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562612 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/751ba251-758b-4c84-afb8-90205e9cb779-operator-scripts\") pod \"751ba251-758b-4c84-afb8-90205e9cb779\" (UID: \"751ba251-758b-4c84-afb8-90205e9cb779\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.562653 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-operator-scripts\") pod \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\" (UID: \"c5ce0222-5363-4b1d-acd2-f8b5a319ad24\") " Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.563894 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ca2f6e9-d0f5-44ce-8562-74480c80d847-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0ca2f6e9-d0f5-44ce-8562-74480c80d847" (UID: "0ca2f6e9-d0f5-44ce-8562-74480c80d847"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564068 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5ce0222-5363-4b1d-acd2-f8b5a319ad24" (UID: "c5ce0222-5363-4b1d-acd2-f8b5a319ad24"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564132 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751ba251-758b-4c84-afb8-90205e9cb779-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "751ba251-758b-4c84-afb8-90205e9cb779" (UID: "751ba251-758b-4c84-afb8-90205e9cb779"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564553 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/556faa3b-2540-4344-b293-b68e892e4459-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "556faa3b-2540-4344-b293-b68e892e4459" (UID: "556faa3b-2540-4344-b293-b68e892e4459"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564818 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ca2f6e9-d0f5-44ce-8562-74480c80d847-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564850 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/751ba251-758b-4c84-afb8-90205e9cb779-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564861 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.564872 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/556faa3b-2540-4344-b293-b68e892e4459-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.570704 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ca2f6e9-d0f5-44ce-8562-74480c80d847-kube-api-access-w49dz" (OuterVolumeSpecName: "kube-api-access-w49dz") pod "0ca2f6e9-d0f5-44ce-8562-74480c80d847" (UID: "0ca2f6e9-d0f5-44ce-8562-74480c80d847"). InnerVolumeSpecName "kube-api-access-w49dz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.570782 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/556faa3b-2540-4344-b293-b68e892e4459-kube-api-access-k8hqc" (OuterVolumeSpecName: "kube-api-access-k8hqc") pod "556faa3b-2540-4344-b293-b68e892e4459" (UID: "556faa3b-2540-4344-b293-b68e892e4459"). InnerVolumeSpecName "kube-api-access-k8hqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.570907 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-kube-api-access-46r9v" (OuterVolumeSpecName: "kube-api-access-46r9v") pod "c5ce0222-5363-4b1d-acd2-f8b5a319ad24" (UID: "c5ce0222-5363-4b1d-acd2-f8b5a319ad24"). InnerVolumeSpecName "kube-api-access-46r9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.573626 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/751ba251-758b-4c84-afb8-90205e9cb779-kube-api-access-jfhw5" (OuterVolumeSpecName: "kube-api-access-jfhw5") pod "751ba251-758b-4c84-afb8-90205e9cb779" (UID: "751ba251-758b-4c84-afb8-90205e9cb779"). InnerVolumeSpecName "kube-api-access-jfhw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.660777 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3b77-account-create-46vm2" event={"ID":"0ca2f6e9-d0f5-44ce-8562-74480c80d847","Type":"ContainerDied","Data":"6f21c593184fb3cbade536106ef4e981dafeca6f2058f167229764c0e4b28ddb"} Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.660831 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f21c593184fb3cbade536106ef4e981dafeca6f2058f167229764c0e4b28ddb" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.660916 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3b77-account-create-46vm2" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.669092 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46r9v\" (UniqueName: \"kubernetes.io/projected/c5ce0222-5363-4b1d-acd2-f8b5a319ad24-kube-api-access-46r9v\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.669128 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfhw5\" (UniqueName: \"kubernetes.io/projected/751ba251-758b-4c84-afb8-90205e9cb779-kube-api-access-jfhw5\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.669139 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8hqc\" (UniqueName: \"kubernetes.io/projected/556faa3b-2540-4344-b293-b68e892e4459-kube-api-access-k8hqc\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.669149 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w49dz\" (UniqueName: \"kubernetes.io/projected/0ca2f6e9-d0f5-44ce-8562-74480c80d847-kube-api-access-w49dz\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.671498 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-xgwkm" event={"ID":"98d3b473-8ffd-47bb-a010-65c275226084","Type":"ContainerStarted","Data":"9a127e7a93f8e1e7122bf7643ebb09f7eb212be5af4ab7d12d346c01dee485e6"} Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.680908 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-plj99" event={"ID":"6adc43bb-8f76-4e28-9afb-7999845bd2ac","Type":"ContainerStarted","Data":"0ddb5156d93074e43856bb42491a9d2f8cd781d7c8295a224a0f73ec4c382dbc"} Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.690325 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-66xr9" event={"ID":"556faa3b-2540-4344-b293-b68e892e4459","Type":"ContainerDied","Data":"3cf1dafd155c2deb7ddc8b5aafd5997d2d934eca910c28ffb16712ab3ae17cf5"} Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.690376 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cf1dafd155c2deb7ddc8b5aafd5997d2d934eca910c28ffb16712ab3ae17cf5" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.690426 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-66xr9" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.700175 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-r44r6" event={"ID":"c5ce0222-5363-4b1d-acd2-f8b5a319ad24","Type":"ContainerDied","Data":"4e8270c6ca71736b725971701941636bf1b3aba4d8056c56c80dc685637d1518"} Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.700261 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-r44r6" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.700290 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e8270c6ca71736b725971701941636bf1b3aba4d8056c56c80dc685637d1518" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.704028 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-958c-account-create-dzqch" event={"ID":"751ba251-758b-4c84-afb8-90205e9cb779","Type":"ContainerDied","Data":"aed8747b4f5ff472862eb4e8ef988197fe85574c01ea6a0c9da49dc6ae1ac050"} Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.704054 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aed8747b4f5ff472862eb4e8ef988197fe85574c01ea6a0c9da49dc6ae1ac050" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.704146 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-958c-account-create-dzqch" Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.706032 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-7b44j"] Nov 21 19:19:08 crc kubenswrapper[4701]: W1121 19:19:08.717693 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27fa1270_e5a1_467f_90e0_88c263d82c29.slice/crio-1aca92dc65fcc47e2d57a959ce899ef63e3bb56ea848af6997496332aba920b4 WatchSource:0}: Error finding container 1aca92dc65fcc47e2d57a959ce899ef63e3bb56ea848af6997496332aba920b4: Status 404 returned error can't find the container with id 1aca92dc65fcc47e2d57a959ce899ef63e3bb56ea848af6997496332aba920b4 Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.858658 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-20ac-account-create-h7hgn"] Nov 21 19:19:08 crc kubenswrapper[4701]: I1121 19:19:08.865984 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3e07-account-create-xh8l9"] Nov 21 19:19:09 crc kubenswrapper[4701]: W1121 19:19:09.667566 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbdec32f_0ec7_4585_a554_aebca2106122.slice/crio-ebba75d27c42bb7549e5b52a1f2d2350979ea4fc8ccc02d9332e95458247ef5f WatchSource:0}: Error finding container ebba75d27c42bb7549e5b52a1f2d2350979ea4fc8ccc02d9332e95458247ef5f: Status 404 returned error can't find the container with id ebba75d27c42bb7549e5b52a1f2d2350979ea4fc8ccc02d9332e95458247ef5f Nov 21 19:19:09 crc kubenswrapper[4701]: W1121 19:19:09.673938 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f9acdb0_f91b_4cb5_bf36_34b5fcaa7c85.slice/crio-dc276d768cd9617ec3a1236bf5727d6b43e5d5e4589097f22463d81f019b0c2e WatchSource:0}: Error finding container dc276d768cd9617ec3a1236bf5727d6b43e5d5e4589097f22463d81f019b0c2e: Status 404 returned error can't find the container with id dc276d768cd9617ec3a1236bf5727d6b43e5d5e4589097f22463d81f019b0c2e Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.716366 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-plj99" event={"ID":"6adc43bb-8f76-4e28-9afb-7999845bd2ac","Type":"ContainerStarted","Data":"bc0b03336d4f24deaad77d15611a906a0de04779c3825a44e8772d79d4d392c1"} Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.718825 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vkw87" event={"ID":"b122f197-4d99-467c-b60c-c2b7912244ba","Type":"ContainerStarted","Data":"e415adef03894db4bda0f20c61d5f4e6950090bcfd161f18473e10e6f144a19a"} Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.727318 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7b44j" event={"ID":"27fa1270-e5a1-467f-90e0-88c263d82c29","Type":"ContainerStarted","Data":"1aca92dc65fcc47e2d57a959ce899ef63e3bb56ea848af6997496332aba920b4"} Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.727429 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-20ac-account-create-h7hgn" event={"ID":"cbdec32f-0ec7-4585-a554-aebca2106122","Type":"ContainerStarted","Data":"ebba75d27c42bb7549e5b52a1f2d2350979ea4fc8ccc02d9332e95458247ef5f"} Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.727450 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3e07-account-create-xh8l9" event={"ID":"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85","Type":"ContainerStarted","Data":"dc276d768cd9617ec3a1236bf5727d6b43e5d5e4589097f22463d81f019b0c2e"} Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.748872 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-plj99" podStartSLOduration=5.748844617 podStartE2EDuration="5.748844617s" podCreationTimestamp="2025-11-21 19:19:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:19:09.737769341 +0000 UTC m=+1040.522909368" watchObservedRunningTime="2025-11-21 19:19:09.748844617 +0000 UTC m=+1040.533984644" Nov 21 19:19:09 crc kubenswrapper[4701]: I1121 19:19:09.770910 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-vkw87" podStartSLOduration=3.423589177 podStartE2EDuration="8.770891958s" podCreationTimestamp="2025-11-21 19:19:01 +0000 UTC" firstStartedPulling="2025-11-21 19:19:02.873641421 +0000 UTC m=+1033.658781448" lastFinishedPulling="2025-11-21 19:19:08.220944202 +0000 UTC m=+1039.006084229" observedRunningTime="2025-11-21 19:19:09.766917951 +0000 UTC m=+1040.552057968" watchObservedRunningTime="2025-11-21 19:19:09.770891958 +0000 UTC m=+1040.556031985" Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.739007 4701 generic.go:334] "Generic (PLEG): container finished" podID="cbdec32f-0ec7-4585-a554-aebca2106122" containerID="01f74fd3eb413d0e6f8c8eb0e3fb985777af444441e2c4844577070869f4c46e" exitCode=0 Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.739090 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-20ac-account-create-h7hgn" event={"ID":"cbdec32f-0ec7-4585-a554-aebca2106122","Type":"ContainerDied","Data":"01f74fd3eb413d0e6f8c8eb0e3fb985777af444441e2c4844577070869f4c46e"} Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.743847 4701 generic.go:334] "Generic (PLEG): container finished" podID="6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" containerID="f4135d9732575126a9da37439373f96b909ae9dc891b76dcc394044243736a52" exitCode=0 Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.743951 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3e07-account-create-xh8l9" event={"ID":"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85","Type":"ContainerDied","Data":"f4135d9732575126a9da37439373f96b909ae9dc891b76dcc394044243736a52"} Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.747816 4701 generic.go:334] "Generic (PLEG): container finished" podID="6adc43bb-8f76-4e28-9afb-7999845bd2ac" containerID="bc0b03336d4f24deaad77d15611a906a0de04779c3825a44e8772d79d4d392c1" exitCode=0 Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.747892 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-plj99" event={"ID":"6adc43bb-8f76-4e28-9afb-7999845bd2ac","Type":"ContainerDied","Data":"bc0b03336d4f24deaad77d15611a906a0de04779c3825a44e8772d79d4d392c1"} Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.752869 4701 generic.go:334] "Generic (PLEG): container finished" podID="27fa1270-e5a1-467f-90e0-88c263d82c29" containerID="af895a35ec1e40b2001b055d1f06610607d627b64fff99653689a60994a3d7a8" exitCode=0 Nov 21 19:19:10 crc kubenswrapper[4701]: I1121 19:19:10.753853 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7b44j" event={"ID":"27fa1270-e5a1-467f-90e0-88c263d82c29","Type":"ContainerDied","Data":"af895a35ec1e40b2001b055d1f06610607d627b64fff99653689a60994a3d7a8"} Nov 21 19:19:13 crc kubenswrapper[4701]: I1121 19:19:13.793476 4701 generic.go:334] "Generic (PLEG): container finished" podID="b122f197-4d99-467c-b60c-c2b7912244ba" containerID="e415adef03894db4bda0f20c61d5f4e6950090bcfd161f18473e10e6f144a19a" exitCode=0 Nov 21 19:19:13 crc kubenswrapper[4701]: I1121 19:19:13.793599 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vkw87" event={"ID":"b122f197-4d99-467c-b60c-c2b7912244ba","Type":"ContainerDied","Data":"e415adef03894db4bda0f20c61d5f4e6950090bcfd161f18473e10e6f144a19a"} Nov 21 19:19:14 crc kubenswrapper[4701]: I1121 19:19:14.822552 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3e07-account-create-xh8l9" event={"ID":"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85","Type":"ContainerDied","Data":"dc276d768cd9617ec3a1236bf5727d6b43e5d5e4589097f22463d81f019b0c2e"} Nov 21 19:19:14 crc kubenswrapper[4701]: I1121 19:19:14.823125 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc276d768cd9617ec3a1236bf5727d6b43e5d5e4589097f22463d81f019b0c2e" Nov 21 19:19:14 crc kubenswrapper[4701]: I1121 19:19:14.831296 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7b44j" event={"ID":"27fa1270-e5a1-467f-90e0-88c263d82c29","Type":"ContainerDied","Data":"1aca92dc65fcc47e2d57a959ce899ef63e3bb56ea848af6997496332aba920b4"} Nov 21 19:19:14 crc kubenswrapper[4701]: I1121 19:19:14.831395 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1aca92dc65fcc47e2d57a959ce899ef63e3bb56ea848af6997496332aba920b4" Nov 21 19:19:14 crc kubenswrapper[4701]: I1121 19:19:14.835404 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:14 crc kubenswrapper[4701]: I1121 19:19:14.846901 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.021328 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-operator-scripts\") pod \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.021476 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79qbv\" (UniqueName: \"kubernetes.io/projected/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-kube-api-access-79qbv\") pod \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\" (UID: \"6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.021666 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa1270-e5a1-467f-90e0-88c263d82c29-operator-scripts\") pod \"27fa1270-e5a1-467f-90e0-88c263d82c29\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.021835 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" (UID: "6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.022135 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27fa1270-e5a1-467f-90e0-88c263d82c29-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27fa1270-e5a1-467f-90e0-88c263d82c29" (UID: "27fa1270-e5a1-467f-90e0-88c263d82c29"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.022300 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn9ss\" (UniqueName: \"kubernetes.io/projected/27fa1270-e5a1-467f-90e0-88c263d82c29-kube-api-access-jn9ss\") pod \"27fa1270-e5a1-467f-90e0-88c263d82c29\" (UID: \"27fa1270-e5a1-467f-90e0-88c263d82c29\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.024405 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.024429 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa1270-e5a1-467f-90e0-88c263d82c29-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.028446 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fa1270-e5a1-467f-90e0-88c263d82c29-kube-api-access-jn9ss" (OuterVolumeSpecName: "kube-api-access-jn9ss") pod "27fa1270-e5a1-467f-90e0-88c263d82c29" (UID: "27fa1270-e5a1-467f-90e0-88c263d82c29"). InnerVolumeSpecName "kube-api-access-jn9ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.030419 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-kube-api-access-79qbv" (OuterVolumeSpecName: "kube-api-access-79qbv") pod "6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" (UID: "6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85"). InnerVolumeSpecName "kube-api-access-79qbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.091054 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.097627 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.129649 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79qbv\" (UniqueName: \"kubernetes.io/projected/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85-kube-api-access-79qbv\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.131457 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn9ss\" (UniqueName: \"kubernetes.io/projected/27fa1270-e5a1-467f-90e0-88c263d82c29-kube-api-access-jn9ss\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.501651 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-plj99" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.520087 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.520490 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.644830 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6jj5\" (UniqueName: \"kubernetes.io/projected/6adc43bb-8f76-4e28-9afb-7999845bd2ac-kube-api-access-d6jj5\") pod \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.644887 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6adc43bb-8f76-4e28-9afb-7999845bd2ac-operator-scripts\") pod \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\" (UID: \"6adc43bb-8f76-4e28-9afb-7999845bd2ac\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645004 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-config-data\") pod \"b122f197-4d99-467c-b60c-c2b7912244ba\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645066 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtfbb\" (UniqueName: \"kubernetes.io/projected/b122f197-4d99-467c-b60c-c2b7912244ba-kube-api-access-gtfbb\") pod \"b122f197-4d99-467c-b60c-c2b7912244ba\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645153 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbdec32f-0ec7-4585-a554-aebca2106122-operator-scripts\") pod \"cbdec32f-0ec7-4585-a554-aebca2106122\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645225 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzp6m\" (UniqueName: \"kubernetes.io/projected/cbdec32f-0ec7-4585-a554-aebca2106122-kube-api-access-xzp6m\") pod \"cbdec32f-0ec7-4585-a554-aebca2106122\" (UID: \"cbdec32f-0ec7-4585-a554-aebca2106122\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645353 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-combined-ca-bundle\") pod \"b122f197-4d99-467c-b60c-c2b7912244ba\" (UID: \"b122f197-4d99-467c-b60c-c2b7912244ba\") " Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645740 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6adc43bb-8f76-4e28-9afb-7999845bd2ac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6adc43bb-8f76-4e28-9afb-7999845bd2ac" (UID: "6adc43bb-8f76-4e28-9afb-7999845bd2ac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.645997 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6adc43bb-8f76-4e28-9afb-7999845bd2ac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.648930 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbdec32f-0ec7-4585-a554-aebca2106122-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cbdec32f-0ec7-4585-a554-aebca2106122" (UID: "cbdec32f-0ec7-4585-a554-aebca2106122"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.648993 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6adc43bb-8f76-4e28-9afb-7999845bd2ac-kube-api-access-d6jj5" (OuterVolumeSpecName: "kube-api-access-d6jj5") pod "6adc43bb-8f76-4e28-9afb-7999845bd2ac" (UID: "6adc43bb-8f76-4e28-9afb-7999845bd2ac"). InnerVolumeSpecName "kube-api-access-d6jj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.650257 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b122f197-4d99-467c-b60c-c2b7912244ba-kube-api-access-gtfbb" (OuterVolumeSpecName: "kube-api-access-gtfbb") pod "b122f197-4d99-467c-b60c-c2b7912244ba" (UID: "b122f197-4d99-467c-b60c-c2b7912244ba"). InnerVolumeSpecName "kube-api-access-gtfbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.650556 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbdec32f-0ec7-4585-a554-aebca2106122-kube-api-access-xzp6m" (OuterVolumeSpecName: "kube-api-access-xzp6m") pod "cbdec32f-0ec7-4585-a554-aebca2106122" (UID: "cbdec32f-0ec7-4585-a554-aebca2106122"). InnerVolumeSpecName "kube-api-access-xzp6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.677028 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b122f197-4d99-467c-b60c-c2b7912244ba" (UID: "b122f197-4d99-467c-b60c-c2b7912244ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.705737 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-config-data" (OuterVolumeSpecName: "config-data") pod "b122f197-4d99-467c-b60c-c2b7912244ba" (UID: "b122f197-4d99-467c-b60c-c2b7912244ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.748239 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6jj5\" (UniqueName: \"kubernetes.io/projected/6adc43bb-8f76-4e28-9afb-7999845bd2ac-kube-api-access-d6jj5\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.748349 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.748371 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtfbb\" (UniqueName: \"kubernetes.io/projected/b122f197-4d99-467c-b60c-c2b7912244ba-kube-api-access-gtfbb\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.748384 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbdec32f-0ec7-4585-a554-aebca2106122-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.748397 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzp6m\" (UniqueName: \"kubernetes.io/projected/cbdec32f-0ec7-4585-a554-aebca2106122-kube-api-access-xzp6m\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.748408 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b122f197-4d99-467c-b60c-c2b7912244ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.848447 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-20ac-account-create-h7hgn" event={"ID":"cbdec32f-0ec7-4585-a554-aebca2106122","Type":"ContainerDied","Data":"ebba75d27c42bb7549e5b52a1f2d2350979ea4fc8ccc02d9332e95458247ef5f"} Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.848505 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ebba75d27c42bb7549e5b52a1f2d2350979ea4fc8ccc02d9332e95458247ef5f" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.848600 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-20ac-account-create-h7hgn" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.855254 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-xgwkm" event={"ID":"98d3b473-8ffd-47bb-a010-65c275226084","Type":"ContainerStarted","Data":"86fa1db4927122530f5aa1a3ab987e220b706eddd564ec4982f87982eb7f40dc"} Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.862642 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-plj99" event={"ID":"6adc43bb-8f76-4e28-9afb-7999845bd2ac","Type":"ContainerDied","Data":"0ddb5156d93074e43856bb42491a9d2f8cd781d7c8295a224a0f73ec4c382dbc"} Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.862684 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ddb5156d93074e43856bb42491a9d2f8cd781d7c8295a224a0f73ec4c382dbc" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.862754 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-plj99" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.868280 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e07-account-create-xh8l9" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.869699 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vkw87" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.883005 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vkw87" event={"ID":"b122f197-4d99-467c-b60c-c2b7912244ba","Type":"ContainerDied","Data":"4e79d78d44eb9462900d2aa4199f41021a896c72fe5788b58fd3ae4c125e00ad"} Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.883090 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e79d78d44eb9462900d2aa4199f41021a896c72fe5788b58fd3ae4c125e00ad" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.883312 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7b44j" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.895192 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 21 19:19:15 crc kubenswrapper[4701]: I1121 19:19:15.903418 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-xgwkm" podStartSLOduration=4.502409234 podStartE2EDuration="11.903395049s" podCreationTimestamp="2025-11-21 19:19:04 +0000 UTC" firstStartedPulling="2025-11-21 19:19:08.131965371 +0000 UTC m=+1038.917105398" lastFinishedPulling="2025-11-21 19:19:15.532951176 +0000 UTC m=+1046.318091213" observedRunningTime="2025-11-21 19:19:15.884221616 +0000 UTC m=+1046.669361643" watchObservedRunningTime="2025-11-21 19:19:15.903395049 +0000 UTC m=+1046.688535076" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.099163 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-b8v5x"] Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.100593 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751ba251-758b-4c84-afb8-90205e9cb779" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.100684 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="751ba251-758b-4c84-afb8-90205e9cb779" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.100772 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5ce0222-5363-4b1d-acd2-f8b5a319ad24" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.100855 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5ce0222-5363-4b1d-acd2-f8b5a319ad24" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.100937 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="556faa3b-2540-4344-b293-b68e892e4459" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101005 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="556faa3b-2540-4344-b293-b68e892e4459" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.101085 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6adc43bb-8f76-4e28-9afb-7999845bd2ac" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101155 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6adc43bb-8f76-4e28-9afb-7999845bd2ac" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.101243 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fa1270-e5a1-467f-90e0-88c263d82c29" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101315 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fa1270-e5a1-467f-90e0-88c263d82c29" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.101388 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b122f197-4d99-467c-b60c-c2b7912244ba" containerName="keystone-db-sync" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101455 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b122f197-4d99-467c-b60c-c2b7912244ba" containerName="keystone-db-sync" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.101519 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ca2f6e9-d0f5-44ce-8562-74480c80d847" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101576 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ca2f6e9-d0f5-44ce-8562-74480c80d847" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.101636 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101698 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: E1121 19:19:16.101775 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdec32f-0ec7-4585-a554-aebca2106122" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.101854 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdec32f-0ec7-4585-a554-aebca2106122" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102229 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdec32f-0ec7-4585-a554-aebca2106122" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102306 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="556faa3b-2540-4344-b293-b68e892e4459" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102383 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ca2f6e9-d0f5-44ce-8562-74480c80d847" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102452 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="751ba251-758b-4c84-afb8-90205e9cb779" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102511 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fa1270-e5a1-467f-90e0-88c263d82c29" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102672 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" containerName="mariadb-account-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102737 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5ce0222-5363-4b1d-acd2-f8b5a319ad24" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102831 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6adc43bb-8f76-4e28-9afb-7999845bd2ac" containerName="mariadb-database-create" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.102895 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b122f197-4d99-467c-b60c-c2b7912244ba" containerName="keystone-db-sync" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.103798 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.114586 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d76db8c55-8v4cb"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.116721 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.121890 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.122125 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.122323 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.122484 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-27z5d" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.122649 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.136599 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-b8v5x"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.198188 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d76db8c55-8v4cb"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266298 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-combined-ca-bundle\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266348 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-swift-storage-0\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266380 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-config\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266476 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-nb\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266522 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-config-data\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266541 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqtcn\" (UniqueName: \"kubernetes.io/projected/c95afb30-2ebc-4223-8b41-155df822aac0-kube-api-access-mqtcn\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266570 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-sb\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266602 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp96x\" (UniqueName: \"kubernetes.io/projected/6960ad8e-3a39-4a2f-98cf-448bf96e8362-kube-api-access-wp96x\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266638 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-credential-keys\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266663 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-scripts\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266711 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-fernet-keys\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.266736 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-svc\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.289452 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-78dcd8d87c-pf5gj"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.293995 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.300806 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.300876 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.300932 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-qpwgz" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.300968 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.340268 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78dcd8d87c-pf5gj"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.368289 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-69chf"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.369917 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370441 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-fernet-keys\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370487 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-svc\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370518 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-combined-ca-bundle\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-swift-storage-0\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370570 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-config\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370596 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-nb\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370620 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-config-data\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370642 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqtcn\" (UniqueName: \"kubernetes.io/projected/c95afb30-2ebc-4223-8b41-155df822aac0-kube-api-access-mqtcn\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370670 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-sb\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370700 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp96x\" (UniqueName: \"kubernetes.io/projected/6960ad8e-3a39-4a2f-98cf-448bf96e8362-kube-api-access-wp96x\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370738 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-credential-keys\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.370763 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-scripts\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.372065 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-nb\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.374851 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-swift-storage-0\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.375829 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-config\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.376037 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.377187 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-sb\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.376180 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.383633 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-wzsf6" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.384352 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-fernet-keys\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.384445 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-config-data\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.384660 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-svc\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.388346 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-credential-keys\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.393349 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-69chf"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.409647 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-scripts\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.416051 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-combined-ca-bundle\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.422155 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqtcn\" (UniqueName: \"kubernetes.io/projected/c95afb30-2ebc-4223-8b41-155df822aac0-kube-api-access-mqtcn\") pod \"dnsmasq-dns-5d76db8c55-8v4cb\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.445312 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp96x\" (UniqueName: \"kubernetes.io/projected/6960ad8e-3a39-4a2f-98cf-448bf96e8362-kube-api-access-wp96x\") pod \"keystone-bootstrap-b8v5x\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.447815 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-kfsds"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.449111 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.450309 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.457848 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.462260 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.462660 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.463151 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hpsk9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.469952 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.472101 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.474977 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-scripts\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475028 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-config-data\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475068 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-scripts\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475101 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-db-sync-config-data\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475146 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjwr4\" (UniqueName: \"kubernetes.io/projected/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-kube-api-access-vjwr4\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475167 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-etc-machine-id\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475187 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-combined-ca-bundle\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475225 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efe8d5fc-c754-4243-bf18-af182791a0a4-horizon-secret-key\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475242 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-config-data\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475272 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efe8d5fc-c754-4243-bf18-af182791a0a4-logs\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.475298 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dfg9\" (UniqueName: \"kubernetes.io/projected/efe8d5fc-c754-4243-bf18-af182791a0a4-kube-api-access-6dfg9\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.478845 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.487335 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-kfsds"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.503739 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.504442 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d76db8c55-8v4cb"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.545164 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.558276 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-rp9lc"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.559856 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.569267 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-rp9lc"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.570129 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.570953 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-g45kf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576685 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjwr4\" (UniqueName: \"kubernetes.io/projected/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-kube-api-access-vjwr4\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576729 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-etc-machine-id\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576761 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-combined-ca-bundle\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576779 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-combined-ca-bundle\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576811 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efe8d5fc-c754-4243-bf18-af182791a0a4-horizon-secret-key\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576827 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr9pz\" (UniqueName: \"kubernetes.io/projected/ffb1df83-0092-42e4-885f-e934786a503b-kube-api-access-sr9pz\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576847 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-config-data\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576867 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576902 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efe8d5fc-c754-4243-bf18-af182791a0a4-logs\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576923 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dfg9\" (UniqueName: \"kubernetes.io/projected/efe8d5fc-c754-4243-bf18-af182791a0a4-kube-api-access-6dfg9\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576949 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-config-data\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576976 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-scripts\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.576991 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-config-data\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577012 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-log-httpd\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577033 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gj5sj\" (UniqueName: \"kubernetes.io/projected/91518a76-c4e2-4f08-831a-aa8fb9d4778c-kube-api-access-gj5sj\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577061 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-scripts\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577078 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91518a76-c4e2-4f08-831a-aa8fb9d4778c-logs\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577105 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-scripts\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577121 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-run-httpd\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577137 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-db-sync-config-data\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577155 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-config-data\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577184 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577214 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-scripts\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.577623 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-etc-machine-id\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.585897 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-scripts\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.586299 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-config-data\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.586878 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efe8d5fc-c754-4243-bf18-af182791a0a4-logs\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.598835 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efe8d5fc-c754-4243-bf18-af182791a0a4-horizon-secret-key\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.601002 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-combined-ca-bundle\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.601455 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-scripts\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.617425 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bd46686f5-2rps9"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.657033 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-db-sync-config-data\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.663736 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-config-data\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.691229 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dfg9\" (UniqueName: \"kubernetes.io/projected/efe8d5fc-c754-4243-bf18-af182791a0a4-kube-api-access-6dfg9\") pod \"horizon-78dcd8d87c-pf5gj\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.697006 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.715401 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjwr4\" (UniqueName: \"kubernetes.io/projected/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-kube-api-access-vjwr4\") pod \"cinder-db-sync-69chf\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.751389 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-run-httpd\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.751472 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-config-data\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.752140 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.752755 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-scripts\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.752906 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-combined-ca-bundle\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.752978 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr9pz\" (UniqueName: \"kubernetes.io/projected/ffb1df83-0092-42e4-885f-e934786a503b-kube-api-access-sr9pz\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.753021 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.753102 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-combined-ca-bundle\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.753504 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-run-httpd\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.753691 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-config-data\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.753777 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkth4\" (UniqueName: \"kubernetes.io/projected/0a93232c-afb8-4ff5-8775-5c3574997149-kube-api-access-hkth4\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.753927 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-log-httpd\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.757414 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gj5sj\" (UniqueName: \"kubernetes.io/projected/91518a76-c4e2-4f08-831a-aa8fb9d4778c-kube-api-access-gj5sj\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.757564 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91518a76-c4e2-4f08-831a-aa8fb9d4778c-logs\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.757668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-db-sync-config-data\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.757711 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-scripts\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.755572 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-log-httpd\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.763895 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91518a76-c4e2-4f08-831a-aa8fb9d4778c-logs\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.800151 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.801684 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.801849 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-combined-ca-bundle\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.802063 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-scripts\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.802470 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-config-data\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.803293 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-config-data\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.804276 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bd46686f5-2rps9"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.812779 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gj5sj\" (UniqueName: \"kubernetes.io/projected/91518a76-c4e2-4f08-831a-aa8fb9d4778c-kube-api-access-gj5sj\") pod \"placement-db-sync-kfsds\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.813963 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr9pz\" (UniqueName: \"kubernetes.io/projected/ffb1df83-0092-42e4-885f-e934786a503b-kube-api-access-sr9pz\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.816481 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-scripts\") pod \"ceilometer-0\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.835973 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-565b678645-7jbt7"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.838597 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.843563 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-565b678645-7jbt7"] Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860300 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-nb\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860362 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-db-sync-config-data\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860452 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-svc\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860472 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-sb\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860555 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-swift-storage-0\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860583 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-config\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860602 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-combined-ca-bundle\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860649 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78dds\" (UniqueName: \"kubernetes.io/projected/ae589118-c566-4aea-9183-6c67706aee1e-kube-api-access-78dds\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.860677 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkth4\" (UniqueName: \"kubernetes.io/projected/0a93232c-afb8-4ff5-8775-5c3574997149-kube-api-access-hkth4\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.864294 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-db-sync-config-data\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.874244 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-combined-ca-bundle\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.894664 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkth4\" (UniqueName: \"kubernetes.io/projected/0a93232c-afb8-4ff5-8775-5c3574997149-kube-api-access-hkth4\") pod \"barbican-db-sync-rp9lc\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.921038 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.922289 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.938260 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962622 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-horizon-secret-key\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962754 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-svc\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962778 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-sb\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962852 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-logs\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962891 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-swift-storage-0\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962923 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-config-data\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.962949 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-config\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.963054 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78dds\" (UniqueName: \"kubernetes.io/projected/ae589118-c566-4aea-9183-6c67706aee1e-kube-api-access-78dds\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.963148 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-nb\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.963191 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-scripts\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.963232 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnbp2\" (UniqueName: \"kubernetes.io/projected/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-kube-api-access-bnbp2\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.964527 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-swift-storage-0\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.965376 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-svc\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.966284 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-config\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.966296 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-nb\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.967018 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-sb\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.978378 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-69chf" Nov 21 19:19:16 crc kubenswrapper[4701]: I1121 19:19:16.991910 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78dds\" (UniqueName: \"kubernetes.io/projected/ae589118-c566-4aea-9183-6c67706aee1e-kube-api-access-78dds\") pod \"dnsmasq-dns-5bd46686f5-2rps9\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.009187 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfsds" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.074871 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-scripts\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.074933 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnbp2\" (UniqueName: \"kubernetes.io/projected/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-kube-api-access-bnbp2\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.074990 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-horizon-secret-key\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.075139 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-logs\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.075213 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-config-data\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.079320 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-logs\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.080230 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-config-data\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.081467 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-scripts\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.081565 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-horizon-secret-key\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.104614 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnbp2\" (UniqueName: \"kubernetes.io/projected/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-kube-api-access-bnbp2\") pod \"horizon-565b678645-7jbt7\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.181117 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-b8v5x"] Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.240713 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d76db8c55-8v4cb"] Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.248521 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.259713 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.713929 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78dcd8d87c-pf5gj"] Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.857989 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-69chf"] Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.884820 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.947804 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78dcd8d87c-pf5gj" event={"ID":"efe8d5fc-c754-4243-bf18-af182791a0a4","Type":"ContainerStarted","Data":"52e07a28d6e8c5ff900637a468d048a807723afffd024f55f5d07eb02cfb23e4"} Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.984965 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" event={"ID":"c95afb30-2ebc-4223-8b41-155df822aac0","Type":"ContainerStarted","Data":"cb7405af580fa7bfb8b562a7ffab35eee24c36c850a7331c4e037b8cf2f20fa6"} Nov 21 19:19:17 crc kubenswrapper[4701]: I1121 19:19:17.985170 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-b8v5x" event={"ID":"6960ad8e-3a39-4a2f-98cf-448bf96e8362","Type":"ContainerStarted","Data":"2322b9f09440efebc1401fd5bb1a025c5d4cddcd740e8eeffe1b5778e870da39"} Nov 21 19:19:18 crc kubenswrapper[4701]: W1121 19:19:18.011640 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91518a76_c4e2_4f08_831a_aa8fb9d4778c.slice/crio-023813cb056ac2bc047969482284e5c13e94511b623a4e3dd83d98be95f4b5e9 WatchSource:0}: Error finding container 023813cb056ac2bc047969482284e5c13e94511b623a4e3dd83d98be95f4b5e9: Status 404 returned error can't find the container with id 023813cb056ac2bc047969482284e5c13e94511b623a4e3dd83d98be95f4b5e9 Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.021836 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-kfsds"] Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.066696 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-rp9lc"] Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.388477 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-565b678645-7jbt7"] Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.468282 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bd46686f5-2rps9"] Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.979926 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565b678645-7jbt7" event={"ID":"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0","Type":"ContainerStarted","Data":"e3e1a9efa342b2284c2b56da11baf05ae8e3fa61227f8b65d7902cb51b359c7e"} Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.991925 4701 generic.go:334] "Generic (PLEG): container finished" podID="c95afb30-2ebc-4223-8b41-155df822aac0" containerID="3123bb41391db144c84673c657f93fd0d4e1583485464ca1f2d7da6157586d37" exitCode=0 Nov 21 19:19:18 crc kubenswrapper[4701]: I1121 19:19:18.992003 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" event={"ID":"c95afb30-2ebc-4223-8b41-155df822aac0","Type":"ContainerDied","Data":"3123bb41391db144c84673c657f93fd0d4e1583485464ca1f2d7da6157586d37"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.001844 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-69chf" event={"ID":"d5b93dd5-e6da-4f02-ac4d-b89773e967d3","Type":"ContainerStarted","Data":"f7ea46bbf91e1a6afb865a5af39dca797bc454b97be2a101c07eb378693c5f00"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.032021 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-b8v5x" event={"ID":"6960ad8e-3a39-4a2f-98cf-448bf96e8362","Type":"ContainerStarted","Data":"4bd04379b743585bea0354cfac0637eb57de9c98620b1951ec6024eb46b07e09"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.059946 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfsds" event={"ID":"91518a76-c4e2-4f08-831a-aa8fb9d4778c","Type":"ContainerStarted","Data":"023813cb056ac2bc047969482284e5c13e94511b623a4e3dd83d98be95f4b5e9"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.066171 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-b8v5x" podStartSLOduration=3.066132042 podStartE2EDuration="3.066132042s" podCreationTimestamp="2025-11-21 19:19:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:19:19.053874934 +0000 UTC m=+1049.839014961" watchObservedRunningTime="2025-11-21 19:19:19.066132042 +0000 UTC m=+1049.851272079" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.070179 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerStarted","Data":"b80c501315d4edd818b2de94e8f88c34d23c8c03e0ec67294749c3b63c718398"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.086647 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae589118-c566-4aea-9183-6c67706aee1e" containerID="4a3c951b2d7618481f56abc802b8e06158e81065deb3977eb8e05e5160205595" exitCode=0 Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.086757 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" event={"ID":"ae589118-c566-4aea-9183-6c67706aee1e","Type":"ContainerDied","Data":"4a3c951b2d7618481f56abc802b8e06158e81065deb3977eb8e05e5160205595"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.086846 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" event={"ID":"ae589118-c566-4aea-9183-6c67706aee1e","Type":"ContainerStarted","Data":"231beea94d8b876f138d80b95a2c1258da0698073ecf48b900c30e687ed3860b"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.098328 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rp9lc" event={"ID":"0a93232c-afb8-4ff5-8775-5c3574997149","Type":"ContainerStarted","Data":"760f4582ce73e1baa1ec174ac291649776c24f5d38a5dfc38e15a00e94f24fa3"} Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.149275 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-565b678645-7jbt7"] Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.186772 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-74f5944bd9-kv77f"] Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.188699 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.230938 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.244325 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-74f5944bd9-kv77f"] Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.262925 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a434d9-d285-4564-aec6-592d14b749fa-logs\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.262984 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smnds\" (UniqueName: \"kubernetes.io/projected/a2a434d9-d285-4564-aec6-592d14b749fa-kube-api-access-smnds\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.263056 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a434d9-d285-4564-aec6-592d14b749fa-horizon-secret-key\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.263116 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-scripts\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.263165 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-config-data\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.367411 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smnds\" (UniqueName: \"kubernetes.io/projected/a2a434d9-d285-4564-aec6-592d14b749fa-kube-api-access-smnds\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.367528 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a434d9-d285-4564-aec6-592d14b749fa-horizon-secret-key\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.367584 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-scripts\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.367629 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-config-data\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.367669 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a434d9-d285-4564-aec6-592d14b749fa-logs\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.368102 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a434d9-d285-4564-aec6-592d14b749fa-logs\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.368773 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-scripts\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.369019 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-config-data\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.373569 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a434d9-d285-4564-aec6-592d14b749fa-horizon-secret-key\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.383428 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smnds\" (UniqueName: \"kubernetes.io/projected/a2a434d9-d285-4564-aec6-592d14b749fa-kube-api-access-smnds\") pod \"horizon-74f5944bd9-kv77f\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.567286 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.578985 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-sklns"] Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.580451 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.591554 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.591794 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4fvtn" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.602889 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-sklns"] Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.640607 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.673530 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-db-sync-config-data\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.673661 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-config-data\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.673728 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdv6s\" (UniqueName: \"kubernetes.io/projected/87272c2c-3166-4a6a-aff9-41278b0b1b51-kube-api-access-kdv6s\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.673765 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-combined-ca-bundle\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.778835 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-nb\") pod \"c95afb30-2ebc-4223-8b41-155df822aac0\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779005 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-svc\") pod \"c95afb30-2ebc-4223-8b41-155df822aac0\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779065 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-config\") pod \"c95afb30-2ebc-4223-8b41-155df822aac0\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779099 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-sb\") pod \"c95afb30-2ebc-4223-8b41-155df822aac0\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779223 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqtcn\" (UniqueName: \"kubernetes.io/projected/c95afb30-2ebc-4223-8b41-155df822aac0-kube-api-access-mqtcn\") pod \"c95afb30-2ebc-4223-8b41-155df822aac0\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779272 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-swift-storage-0\") pod \"c95afb30-2ebc-4223-8b41-155df822aac0\" (UID: \"c95afb30-2ebc-4223-8b41-155df822aac0\") " Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779527 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-config-data\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779595 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdv6s\" (UniqueName: \"kubernetes.io/projected/87272c2c-3166-4a6a-aff9-41278b0b1b51-kube-api-access-kdv6s\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779635 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-combined-ca-bundle\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.779679 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-db-sync-config-data\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.797120 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-config-data\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.797129 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c95afb30-2ebc-4223-8b41-155df822aac0-kube-api-access-mqtcn" (OuterVolumeSpecName: "kube-api-access-mqtcn") pod "c95afb30-2ebc-4223-8b41-155df822aac0" (UID: "c95afb30-2ebc-4223-8b41-155df822aac0"). InnerVolumeSpecName "kube-api-access-mqtcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.819706 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-db-sync-config-data\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.831164 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdv6s\" (UniqueName: \"kubernetes.io/projected/87272c2c-3166-4a6a-aff9-41278b0b1b51-kube-api-access-kdv6s\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.834928 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-combined-ca-bundle\") pod \"glance-db-sync-sklns\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.863669 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c95afb30-2ebc-4223-8b41-155df822aac0" (UID: "c95afb30-2ebc-4223-8b41-155df822aac0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.866432 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c95afb30-2ebc-4223-8b41-155df822aac0" (UID: "c95afb30-2ebc-4223-8b41-155df822aac0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.886407 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqtcn\" (UniqueName: \"kubernetes.io/projected/c95afb30-2ebc-4223-8b41-155df822aac0-kube-api-access-mqtcn\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.886560 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.886630 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.892001 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-config" (OuterVolumeSpecName: "config") pod "c95afb30-2ebc-4223-8b41-155df822aac0" (UID: "c95afb30-2ebc-4223-8b41-155df822aac0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.892190 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c95afb30-2ebc-4223-8b41-155df822aac0" (UID: "c95afb30-2ebc-4223-8b41-155df822aac0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.933690 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sklns" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.944604 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c95afb30-2ebc-4223-8b41-155df822aac0" (UID: "c95afb30-2ebc-4223-8b41-155df822aac0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.988514 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.988544 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:19 crc kubenswrapper[4701]: I1121 19:19:19.988555 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c95afb30-2ebc-4223-8b41-155df822aac0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.081971 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-69n94"] Nov 21 19:19:20 crc kubenswrapper[4701]: E1121 19:19:20.082556 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c95afb30-2ebc-4223-8b41-155df822aac0" containerName="init" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.082576 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c95afb30-2ebc-4223-8b41-155df822aac0" containerName="init" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.082757 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c95afb30-2ebc-4223-8b41-155df822aac0" containerName="init" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.083573 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.096157 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.101353 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-69n94"] Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.101473 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-dd6b9" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.101792 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.194163 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-config\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.194673 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rht9m\" (UniqueName: \"kubernetes.io/projected/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-kube-api-access-rht9m\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.194784 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-combined-ca-bundle\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.224055 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" event={"ID":"c95afb30-2ebc-4223-8b41-155df822aac0","Type":"ContainerDied","Data":"cb7405af580fa7bfb8b562a7ffab35eee24c36c850a7331c4e037b8cf2f20fa6"} Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.224125 4701 scope.go:117] "RemoveContainer" containerID="3123bb41391db144c84673c657f93fd0d4e1583485464ca1f2d7da6157586d37" Nov 21 19:19:20 crc kubenswrapper[4701]: I1121 19:19:20.224317 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d76db8c55-8v4cb" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.229701 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" event={"ID":"ae589118-c566-4aea-9183-6c67706aee1e","Type":"ContainerStarted","Data":"29f2660b599054b49bbcdedef9a95527e7a187beb99e2ccada4764be2a7a69c0"} Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.302658 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" podStartSLOduration=4.30263086 podStartE2EDuration="4.30263086s" podCreationTimestamp="2025-11-21 19:19:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:19:20.273598053 +0000 UTC m=+1051.058738080" watchObservedRunningTime="2025-11-21 19:19:20.30263086 +0000 UTC m=+1051.087770887" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.303764 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-74f5944bd9-kv77f"] Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.347697 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-combined-ca-bundle\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.347933 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-config\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.348100 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rht9m\" (UniqueName: \"kubernetes.io/projected/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-kube-api-access-rht9m\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.361717 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-combined-ca-bundle\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.362910 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-config\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.381888 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rht9m\" (UniqueName: \"kubernetes.io/projected/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-kube-api-access-rht9m\") pod \"neutron-db-sync-69n94\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.398795 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d76db8c55-8v4cb"] Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.410511 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d76db8c55-8v4cb"] Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:20.439927 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.244301 4701 generic.go:334] "Generic (PLEG): container finished" podID="98d3b473-8ffd-47bb-a010-65c275226084" containerID="86fa1db4927122530f5aa1a3ab987e220b706eddd564ec4982f87982eb7f40dc" exitCode=0 Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.244382 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-xgwkm" event={"ID":"98d3b473-8ffd-47bb-a010-65c275226084","Type":"ContainerDied","Data":"86fa1db4927122530f5aa1a3ab987e220b706eddd564ec4982f87982eb7f40dc"} Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.246918 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74f5944bd9-kv77f" event={"ID":"a2a434d9-d285-4564-aec6-592d14b749fa","Type":"ContainerStarted","Data":"eab2f7a779ec53f6f32fdea5bde0091f39b98ab2b297986b0aa3cb521b667d25"} Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.257619 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.349442 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-sklns"] Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.631005 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-69n94"] Nov 21 19:19:21 crc kubenswrapper[4701]: W1121 19:19:21.692081 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a8ca8ef_555a_4ef6_a09e_1ff0e9b841f1.slice/crio-531af818dc604bf092c094c9fc5ed442cb34be6ef389f942eec249994bf328c9 WatchSource:0}: Error finding container 531af818dc604bf092c094c9fc5ed442cb34be6ef389f942eec249994bf328c9: Status 404 returned error can't find the container with id 531af818dc604bf092c094c9fc5ed442cb34be6ef389f942eec249994bf328c9 Nov 21 19:19:21 crc kubenswrapper[4701]: I1121 19:19:21.967109 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c95afb30-2ebc-4223-8b41-155df822aac0" path="/var/lib/kubelet/pods/c95afb30-2ebc-4223-8b41-155df822aac0/volumes" Nov 21 19:19:22 crc kubenswrapper[4701]: I1121 19:19:22.285654 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-69n94" event={"ID":"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1","Type":"ContainerStarted","Data":"531af818dc604bf092c094c9fc5ed442cb34be6ef389f942eec249994bf328c9"} Nov 21 19:19:22 crc kubenswrapper[4701]: I1121 19:19:22.288326 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sklns" event={"ID":"87272c2c-3166-4a6a-aff9-41278b0b1b51","Type":"ContainerStarted","Data":"e418e9aa2674d7ff4bf099fb85d5f3b49d0f5b2120d4c27aaaa5a8cacd7cee7c"} Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.327537 4701 generic.go:334] "Generic (PLEG): container finished" podID="6960ad8e-3a39-4a2f-98cf-448bf96e8362" containerID="4bd04379b743585bea0354cfac0637eb57de9c98620b1951ec6024eb46b07e09" exitCode=0 Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.327627 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-b8v5x" event={"ID":"6960ad8e-3a39-4a2f-98cf-448bf96e8362","Type":"ContainerDied","Data":"4bd04379b743585bea0354cfac0637eb57de9c98620b1951ec6024eb46b07e09"} Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.704940 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.764022 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-config-data\") pod \"98d3b473-8ffd-47bb-a010-65c275226084\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.764367 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkrsj\" (UniqueName: \"kubernetes.io/projected/98d3b473-8ffd-47bb-a010-65c275226084-kube-api-access-hkrsj\") pod \"98d3b473-8ffd-47bb-a010-65c275226084\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.764416 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-combined-ca-bundle\") pod \"98d3b473-8ffd-47bb-a010-65c275226084\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.764516 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-db-sync-config-data\") pod \"98d3b473-8ffd-47bb-a010-65c275226084\" (UID: \"98d3b473-8ffd-47bb-a010-65c275226084\") " Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.772330 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98d3b473-8ffd-47bb-a010-65c275226084-kube-api-access-hkrsj" (OuterVolumeSpecName: "kube-api-access-hkrsj") pod "98d3b473-8ffd-47bb-a010-65c275226084" (UID: "98d3b473-8ffd-47bb-a010-65c275226084"). InnerVolumeSpecName "kube-api-access-hkrsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.802399 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "98d3b473-8ffd-47bb-a010-65c275226084" (UID: "98d3b473-8ffd-47bb-a010-65c275226084"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.840827 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98d3b473-8ffd-47bb-a010-65c275226084" (UID: "98d3b473-8ffd-47bb-a010-65c275226084"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.864306 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-config-data" (OuterVolumeSpecName: "config-data") pod "98d3b473-8ffd-47bb-a010-65c275226084" (UID: "98d3b473-8ffd-47bb-a010-65c275226084"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.867778 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkrsj\" (UniqueName: \"kubernetes.io/projected/98d3b473-8ffd-47bb-a010-65c275226084-kube-api-access-hkrsj\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.867834 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.867844 4701 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:24 crc kubenswrapper[4701]: I1121 19:19:24.867856 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98d3b473-8ffd-47bb-a010-65c275226084-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.285895 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78dcd8d87c-pf5gj"] Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.323616 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-869574dbc6-l96tx"] Nov 21 19:19:25 crc kubenswrapper[4701]: E1121 19:19:25.325240 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98d3b473-8ffd-47bb-a010-65c275226084" containerName="watcher-db-sync" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.325262 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="98d3b473-8ffd-47bb-a010-65c275226084" containerName="watcher-db-sync" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.325492 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="98d3b473-8ffd-47bb-a010-65c275226084" containerName="watcher-db-sync" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.326492 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.329583 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.343492 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-869574dbc6-l96tx"] Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.379872 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-xgwkm" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.379869 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-xgwkm" event={"ID":"98d3b473-8ffd-47bb-a010-65c275226084","Type":"ContainerDied","Data":"9a127e7a93f8e1e7122bf7643ebb09f7eb212be5af4ab7d12d346c01dee485e6"} Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.379944 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a127e7a93f8e1e7122bf7643ebb09f7eb212be5af4ab7d12d346c01dee485e6" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385534 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-combined-ca-bundle\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385585 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-config-data\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385620 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c543587-173c-4fb2-b730-72b848f845d6-logs\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385681 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn697\" (UniqueName: \"kubernetes.io/projected/1c543587-173c-4fb2-b730-72b848f845d6-kube-api-access-rn697\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385819 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-scripts\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385845 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-secret-key\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.385873 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-tls-certs\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.405728 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-69n94" event={"ID":"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1","Type":"ContainerStarted","Data":"52af84ab1204a8fa9b18e6eae42ba1f75dfc50729fcb446205319c006baf36a5"} Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.411919 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-74f5944bd9-kv77f"] Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.435656 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6c68b8ff68-tfcgs"] Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.440179 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.453257 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-69n94" podStartSLOduration=5.453230167 podStartE2EDuration="5.453230167s" podCreationTimestamp="2025-11-21 19:19:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:19:25.44098422 +0000 UTC m=+1056.226124247" watchObservedRunningTime="2025-11-21 19:19:25.453230167 +0000 UTC m=+1056.238370194" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.482497 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c68b8ff68-tfcgs"] Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.487824 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-scripts\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.487884 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-secret-key\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.487914 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-tls-certs\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.487940 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-combined-ca-bundle\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.487958 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-config-data\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.487978 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c543587-173c-4fb2-b730-72b848f845d6-logs\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488018 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-horizon-tls-certs\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488057 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-logs\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488079 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn697\" (UniqueName: \"kubernetes.io/projected/1c543587-173c-4fb2-b730-72b848f845d6-kube-api-access-rn697\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488115 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-horizon-secret-key\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488135 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-combined-ca-bundle\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488165 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4w6w\" (UniqueName: \"kubernetes.io/projected/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-kube-api-access-g4w6w\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488187 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-config-data\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.488236 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-scripts\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.489149 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-scripts\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.492374 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c543587-173c-4fb2-b730-72b848f845d6-logs\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.493426 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-config-data\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.496718 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-tls-certs\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.496773 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-secret-key\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.497752 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-combined-ca-bundle\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.514965 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn697\" (UniqueName: \"kubernetes.io/projected/1c543587-173c-4fb2-b730-72b848f845d6-kube-api-access-rn697\") pod \"horizon-869574dbc6-l96tx\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590075 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4w6w\" (UniqueName: \"kubernetes.io/projected/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-kube-api-access-g4w6w\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590172 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-config-data\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590237 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-scripts\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590382 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-horizon-tls-certs\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590422 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-logs\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590464 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-horizon-secret-key\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.590489 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-combined-ca-bundle\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.592229 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-scripts\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.592733 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-config-data\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.593045 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-logs\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.597110 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-horizon-tls-certs\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.598459 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-combined-ca-bundle\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.599803 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-horizon-secret-key\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.608559 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4w6w\" (UniqueName: \"kubernetes.io/projected/7d8b1846-dcd5-49b4-8eb2-74b0462538e1-kube-api-access-g4w6w\") pod \"horizon-6c68b8ff68-tfcgs\" (UID: \"7d8b1846-dcd5-49b4-8eb2-74b0462538e1\") " pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.662596 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:19:25 crc kubenswrapper[4701]: I1121 19:19:25.764702 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.086823 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.090162 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.098312 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.098902 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-9p59w" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.110087 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.134322 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.136070 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.146532 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.156421 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.189537 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.191133 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.194789 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.205791 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.205888 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-config-data\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.205922 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a7a5be4-96a4-4574-9839-2d0576595305-logs\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.205944 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.205965 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-logs\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.205985 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-logs\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.206010 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t845q\" (UniqueName: \"kubernetes.io/projected/8a7a5be4-96a4-4574-9839-2d0576595305-kube-api-access-t845q\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.206242 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7skcr\" (UniqueName: \"kubernetes.io/projected/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-kube-api-access-7skcr\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.206504 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.207268 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f9k4\" (UniqueName: \"kubernetes.io/projected/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-kube-api-access-7f9k4\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.207898 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.207981 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-config-data\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.208061 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-config-data\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.208113 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.224326 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310509 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310614 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310688 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-config-data\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310717 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a7a5be4-96a4-4574-9839-2d0576595305-logs\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310745 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310770 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-logs\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310813 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-logs\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310843 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t845q\" (UniqueName: \"kubernetes.io/projected/8a7a5be4-96a4-4574-9839-2d0576595305-kube-api-access-t845q\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310876 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7skcr\" (UniqueName: \"kubernetes.io/projected/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-kube-api-access-7skcr\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310919 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.310958 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f9k4\" (UniqueName: \"kubernetes.io/projected/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-kube-api-access-7f9k4\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.311007 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.311031 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-config-data\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.311074 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-config-data\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.311685 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a7a5be4-96a4-4574-9839-2d0576595305-logs\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.313897 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-logs\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.314215 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-logs\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.320245 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-config-data\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.321398 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.321591 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-config-data\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.321685 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.323172 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.323899 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-config-data\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.331013 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.339093 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.340912 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t845q\" (UniqueName: \"kubernetes.io/projected/8a7a5be4-96a4-4574-9839-2d0576595305-kube-api-access-t845q\") pod \"watcher-decision-engine-0\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.341326 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7skcr\" (UniqueName: \"kubernetes.io/projected/2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f-kube-api-access-7skcr\") pod \"watcher-applier-0\" (UID: \"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f\") " pod="openstack/watcher-applier-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.348496 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f9k4\" (UniqueName: \"kubernetes.io/projected/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-kube-api-access-7f9k4\") pod \"watcher-api-0\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.426775 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.481696 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 21 19:19:26 crc kubenswrapper[4701]: I1121 19:19:26.508344 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 21 19:19:27 crc kubenswrapper[4701]: I1121 19:19:27.251043 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:19:27 crc kubenswrapper[4701]: I1121 19:19:27.331251 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85789dd45c-hpddc"] Nov 21 19:19:27 crc kubenswrapper[4701]: I1121 19:19:27.331526 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" containerID="cri-o://218230f11f6a75f5f8c3f8e1683007a2bc0d9c050f4727d2a8f170029f3651d3" gracePeriod=10 Nov 21 19:19:28 crc kubenswrapper[4701]: I1121 19:19:28.454195 4701 generic.go:334] "Generic (PLEG): container finished" podID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerID="218230f11f6a75f5f8c3f8e1683007a2bc0d9c050f4727d2a8f170029f3651d3" exitCode=0 Nov 21 19:19:28 crc kubenswrapper[4701]: I1121 19:19:28.454278 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" event={"ID":"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef","Type":"ContainerDied","Data":"218230f11f6a75f5f8c3f8e1683007a2bc0d9c050f4727d2a8f170029f3651d3"} Nov 21 19:19:29 crc kubenswrapper[4701]: I1121 19:19:29.972470 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: connect: connection refused" Nov 21 19:19:34 crc kubenswrapper[4701]: I1121 19:19:34.973096 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: connect: connection refused" Nov 21 19:19:37 crc kubenswrapper[4701]: E1121 19:19:37.020899 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 21 19:19:37 crc kubenswrapper[4701]: E1121 19:19:37.021661 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 21 19:19:37 crc kubenswrapper[4701]: E1121 19:19:37.021861 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n698h5d7hddh55ch8bh5f8h549hdbh666h95h86h58ch5ddh5b6h5ddh99h645h56dh696h54bh5f5h598h97h68fh6fh7ch8bh656hd9h64bh558h664q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6dfg9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-78dcd8d87c-pf5gj_openstack(efe8d5fc-c754-4243-bf18-af182791a0a4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:19:37 crc kubenswrapper[4701]: E1121 19:19:37.026827 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-78dcd8d87c-pf5gj" podUID="efe8d5fc-c754-4243-bf18-af182791a0a4" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.746441 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.746981 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.747582 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:38.102.83.164:5001/podified-master-centos10/openstack-placement-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gj5sj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-kfsds_openstack(91518a76-c4e2-4f08-831a-aa8fb9d4778c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.749376 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-kfsds" podUID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.771072 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.771166 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.771463 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n596h645h59ch647h58bh5c8h596h58h66fh96h695h7dh9ch86h555h5c9h5ch54h55fh585h98h648h5dbh7fh5b9h58fh5b9h658h56h5bfh9bh55cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-smnds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-74f5944bd9-kv77f_openstack(a2a434d9-d285-4564-aec6-592d14b749fa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.776359 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-74f5944bd9-kv77f" podUID="a2a434d9-d285-4564-aec6-592d14b749fa" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.787825 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.787905 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.788111 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f5hb5h6dhb6h6fh59fh664h549h87h674h5d7h5f9h688h659h54chc5h5fh98h6bh685h68bh85hdhf5h677h565h5d6h5d8h547h556h66hf5q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bnbp2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-565b678645-7jbt7_openstack(e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:19:38 crc kubenswrapper[4701]: E1121 19:19:38.792457 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-565b678645-7jbt7" podUID="e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" Nov 21 19:19:38 crc kubenswrapper[4701]: I1121 19:19:38.876567 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.019992 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-scripts\") pod \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.020730 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-config-data\") pod \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.020776 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp96x\" (UniqueName: \"kubernetes.io/projected/6960ad8e-3a39-4a2f-98cf-448bf96e8362-kube-api-access-wp96x\") pod \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.020803 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-fernet-keys\") pod \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.020952 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-credential-keys\") pod \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.021056 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-combined-ca-bundle\") pod \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\" (UID: \"6960ad8e-3a39-4a2f-98cf-448bf96e8362\") " Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.042402 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-scripts" (OuterVolumeSpecName: "scripts") pod "6960ad8e-3a39-4a2f-98cf-448bf96e8362" (UID: "6960ad8e-3a39-4a2f-98cf-448bf96e8362"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.049051 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6960ad8e-3a39-4a2f-98cf-448bf96e8362" (UID: "6960ad8e-3a39-4a2f-98cf-448bf96e8362"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.049265 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6960ad8e-3a39-4a2f-98cf-448bf96e8362-kube-api-access-wp96x" (OuterVolumeSpecName: "kube-api-access-wp96x") pod "6960ad8e-3a39-4a2f-98cf-448bf96e8362" (UID: "6960ad8e-3a39-4a2f-98cf-448bf96e8362"). InnerVolumeSpecName "kube-api-access-wp96x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.051867 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6960ad8e-3a39-4a2f-98cf-448bf96e8362" (UID: "6960ad8e-3a39-4a2f-98cf-448bf96e8362"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.066511 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6960ad8e-3a39-4a2f-98cf-448bf96e8362" (UID: "6960ad8e-3a39-4a2f-98cf-448bf96e8362"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.088317 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-config-data" (OuterVolumeSpecName: "config-data") pod "6960ad8e-3a39-4a2f-98cf-448bf96e8362" (UID: "6960ad8e-3a39-4a2f-98cf-448bf96e8362"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.124166 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.124219 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.124234 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp96x\" (UniqueName: \"kubernetes.io/projected/6960ad8e-3a39-4a2f-98cf-448bf96e8362-kube-api-access-wp96x\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.124250 4701 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.124259 4701 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.124279 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6960ad8e-3a39-4a2f-98cf-448bf96e8362-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.589804 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-b8v5x" event={"ID":"6960ad8e-3a39-4a2f-98cf-448bf96e8362","Type":"ContainerDied","Data":"2322b9f09440efebc1401fd5bb1a025c5d4cddcd740e8eeffe1b5778e870da39"} Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.589882 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2322b9f09440efebc1401fd5bb1a025c5d4cddcd740e8eeffe1b5778e870da39" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.589991 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-b8v5x" Nov 21 19:19:39 crc kubenswrapper[4701]: E1121 19:19:39.593379 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-placement-api:watcher_latest\\\"\"" pod="openstack/placement-db-sync-kfsds" podUID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.987146 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-b8v5x"] Nov 21 19:19:39 crc kubenswrapper[4701]: I1121 19:19:39.995301 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-b8v5x"] Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.083009 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-c699r"] Nov 21 19:19:40 crc kubenswrapper[4701]: E1121 19:19:40.083609 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6960ad8e-3a39-4a2f-98cf-448bf96e8362" containerName="keystone-bootstrap" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.083631 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6960ad8e-3a39-4a2f-98cf-448bf96e8362" containerName="keystone-bootstrap" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.083875 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6960ad8e-3a39-4a2f-98cf-448bf96e8362" containerName="keystone-bootstrap" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.084647 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.088176 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.088425 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.088613 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.088780 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.088849 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-27z5d" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.096512 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-c699r"] Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.155065 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-config-data\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.155516 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q6rp\" (UniqueName: \"kubernetes.io/projected/12e527f7-2e4d-421a-8639-e282a383774a-kube-api-access-2q6rp\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.155692 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-fernet-keys\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.155835 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-credential-keys\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.155935 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-combined-ca-bundle\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.156026 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-scripts\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.257965 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-fernet-keys\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.258117 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-credential-keys\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.258188 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-combined-ca-bundle\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.258251 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-scripts\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.258325 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-config-data\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.258428 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q6rp\" (UniqueName: \"kubernetes.io/projected/12e527f7-2e4d-421a-8639-e282a383774a-kube-api-access-2q6rp\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.273168 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-credential-keys\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.273177 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-combined-ca-bundle\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.273298 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-fernet-keys\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.273666 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-config-data\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.278543 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-scripts\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.278859 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q6rp\" (UniqueName: \"kubernetes.io/projected/12e527f7-2e4d-421a-8639-e282a383774a-kube-api-access-2q6rp\") pod \"keystone-bootstrap-c699r\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:40 crc kubenswrapper[4701]: I1121 19:19:40.412935 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-c699r" Nov 21 19:19:41 crc kubenswrapper[4701]: I1121 19:19:41.963804 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6960ad8e-3a39-4a2f-98cf-448bf96e8362" path="/var/lib/kubelet/pods/6960ad8e-3a39-4a2f-98cf-448bf96e8362/volumes" Nov 21 19:19:44 crc kubenswrapper[4701]: I1121 19:19:44.973331 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 21 19:19:44 crc kubenswrapper[4701]: I1121 19:19:44.974025 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:19:46 crc kubenswrapper[4701]: E1121 19:19:46.504118 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-glance-api:watcher_latest" Nov 21 19:19:46 crc kubenswrapper[4701]: E1121 19:19:46.504692 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-glance-api:watcher_latest" Nov 21 19:19:46 crc kubenswrapper[4701]: E1121 19:19:46.504923 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:38.102.83.164:5001/podified-master-centos10/openstack-glance-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kdv6s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-sklns_openstack(87272c2c-3166-4a6a-aff9-41278b0b1b51): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:19:46 crc kubenswrapper[4701]: E1121 19:19:46.506273 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-sklns" podUID="87272c2c-3166-4a6a-aff9-41278b0b1b51" Nov 21 19:19:46 crc kubenswrapper[4701]: E1121 19:19:46.701149 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-glance-api:watcher_latest\\\"\"" pod="openstack/glance-db-sync-sklns" podUID="87272c2c-3166-4a6a-aff9-41278b0b1b51" Nov 21 19:19:48 crc kubenswrapper[4701]: I1121 19:19:48.724248 4701 generic.go:334] "Generic (PLEG): container finished" podID="1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" containerID="52af84ab1204a8fa9b18e6eae42ba1f75dfc50729fcb446205319c006baf36a5" exitCode=0 Nov 21 19:19:48 crc kubenswrapper[4701]: I1121 19:19:48.724336 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-69n94" event={"ID":"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1","Type":"ContainerDied","Data":"52af84ab1204a8fa9b18e6eae42ba1f75dfc50729fcb446205319c006baf36a5"} Nov 21 19:19:49 crc kubenswrapper[4701]: I1121 19:19:49.974093 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 21 19:19:54 crc kubenswrapper[4701]: I1121 19:19:54.975287 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.818328 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-74f5944bd9-kv77f" event={"ID":"a2a434d9-d285-4564-aec6-592d14b749fa","Type":"ContainerDied","Data":"eab2f7a779ec53f6f32fdea5bde0091f39b98ab2b297986b0aa3cb521b667d25"} Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.818850 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eab2f7a779ec53f6f32fdea5bde0091f39b98ab2b297986b0aa3cb521b667d25" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.821592 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-69n94" event={"ID":"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1","Type":"ContainerDied","Data":"531af818dc604bf092c094c9fc5ed442cb34be6ef389f942eec249994bf328c9"} Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.821625 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="531af818dc604bf092c094c9fc5ed442cb34be6ef389f942eec249994bf328c9" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.823243 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565b678645-7jbt7" event={"ID":"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0","Type":"ContainerDied","Data":"e3e1a9efa342b2284c2b56da11baf05ae8e3fa61227f8b65d7902cb51b359c7e"} Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.823267 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3e1a9efa342b2284c2b56da11baf05ae8e3fa61227f8b65d7902cb51b359c7e" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.824677 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78dcd8d87c-pf5gj" event={"ID":"efe8d5fc-c754-4243-bf18-af182791a0a4","Type":"ContainerDied","Data":"52e07a28d6e8c5ff900637a468d048a807723afffd024f55f5d07eb02cfb23e4"} Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.824703 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52e07a28d6e8c5ff900637a468d048a807723afffd024f55f5d07eb02cfb23e4" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.826850 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" event={"ID":"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef","Type":"ContainerDied","Data":"b32dfaf2982e236da1a92d2ff44312e9f6c21ef7487298721daf4eeac6e8d88a"} Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.826886 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b32dfaf2982e236da1a92d2ff44312e9f6c21ef7487298721daf4eeac6e8d88a" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.837807 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.844612 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.853510 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.864966 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.875942 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979344 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-config-data\") pod \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979396 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-combined-ca-bundle\") pod \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979448 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-config\") pod \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979519 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rht9m\" (UniqueName: \"kubernetes.io/projected/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-kube-api-access-rht9m\") pod \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979550 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efe8d5fc-c754-4243-bf18-af182791a0a4-logs\") pod \"efe8d5fc-c754-4243-bf18-af182791a0a4\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979635 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-svc\") pod \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979670 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a434d9-d285-4564-aec6-592d14b749fa-horizon-secret-key\") pod \"a2a434d9-d285-4564-aec6-592d14b749fa\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979696 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dfg9\" (UniqueName: \"kubernetes.io/projected/efe8d5fc-c754-4243-bf18-af182791a0a4-kube-api-access-6dfg9\") pod \"efe8d5fc-c754-4243-bf18-af182791a0a4\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979736 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmtvj\" (UniqueName: \"kubernetes.io/projected/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-kube-api-access-fmtvj\") pod \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979760 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-scripts\") pod \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979796 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-logs\") pod \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979817 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-config-data\") pod \"efe8d5fc-c754-4243-bf18-af182791a0a4\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979842 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-sb\") pod \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979870 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smnds\" (UniqueName: \"kubernetes.io/projected/a2a434d9-d285-4564-aec6-592d14b749fa-kube-api-access-smnds\") pod \"a2a434d9-d285-4564-aec6-592d14b749fa\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979895 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-nb\") pod \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979915 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a434d9-d285-4564-aec6-592d14b749fa-logs\") pod \"a2a434d9-d285-4564-aec6-592d14b749fa\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979950 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-scripts\") pod \"a2a434d9-d285-4564-aec6-592d14b749fa\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.979973 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-swift-storage-0\") pod \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\" (UID: \"e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.980000 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-horizon-secret-key\") pod \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.980038 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efe8d5fc-c754-4243-bf18-af182791a0a4-horizon-secret-key\") pod \"efe8d5fc-c754-4243-bf18-af182791a0a4\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.980070 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-config\") pod \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\" (UID: \"1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.980098 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-scripts\") pod \"efe8d5fc-c754-4243-bf18-af182791a0a4\" (UID: \"efe8d5fc-c754-4243-bf18-af182791a0a4\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.980158 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-config-data\") pod \"a2a434d9-d285-4564-aec6-592d14b749fa\" (UID: \"a2a434d9-d285-4564-aec6-592d14b749fa\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.980213 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnbp2\" (UniqueName: \"kubernetes.io/projected/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-kube-api-access-bnbp2\") pod \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\" (UID: \"e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0\") " Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.987672 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efe8d5fc-c754-4243-bf18-af182791a0a4-logs" (OuterVolumeSpecName: "logs") pod "efe8d5fc-c754-4243-bf18-af182791a0a4" (UID: "efe8d5fc-c754-4243-bf18-af182791a0a4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.987733 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-scripts" (OuterVolumeSpecName: "scripts") pod "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" (UID: "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.987950 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-logs" (OuterVolumeSpecName: "logs") pod "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" (UID: "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.988700 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-config-data" (OuterVolumeSpecName: "config-data") pod "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" (UID: "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.991480 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-config-data" (OuterVolumeSpecName: "config-data") pod "efe8d5fc-c754-4243-bf18-af182791a0a4" (UID: "efe8d5fc-c754-4243-bf18-af182791a0a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.993437 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-scripts" (OuterVolumeSpecName: "scripts") pod "efe8d5fc-c754-4243-bf18-af182791a0a4" (UID: "efe8d5fc-c754-4243-bf18-af182791a0a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:55 crc kubenswrapper[4701]: I1121 19:19:55.993866 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2a434d9-d285-4564-aec6-592d14b749fa-logs" (OuterVolumeSpecName: "logs") pod "a2a434d9-d285-4564-aec6-592d14b749fa" (UID: "a2a434d9-d285-4564-aec6-592d14b749fa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.013314 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-config-data" (OuterVolumeSpecName: "config-data") pod "a2a434d9-d285-4564-aec6-592d14b749fa" (UID: "a2a434d9-d285-4564-aec6-592d14b749fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.017479 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-scripts" (OuterVolumeSpecName: "scripts") pod "a2a434d9-d285-4564-aec6-592d14b749fa" (UID: "a2a434d9-d285-4564-aec6-592d14b749fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.018615 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-kube-api-access-rht9m" (OuterVolumeSpecName: "kube-api-access-rht9m") pod "1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" (UID: "1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1"). InnerVolumeSpecName "kube-api-access-rht9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.018637 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2a434d9-d285-4564-aec6-592d14b749fa-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a2a434d9-d285-4564-aec6-592d14b749fa" (UID: "a2a434d9-d285-4564-aec6-592d14b749fa"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.018752 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-kube-api-access-bnbp2" (OuterVolumeSpecName: "kube-api-access-bnbp2") pod "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" (UID: "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0"). InnerVolumeSpecName "kube-api-access-bnbp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.019163 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efe8d5fc-c754-4243-bf18-af182791a0a4-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "efe8d5fc-c754-4243-bf18-af182791a0a4" (UID: "efe8d5fc-c754-4243-bf18-af182791a0a4"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.019912 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-kube-api-access-fmtvj" (OuterVolumeSpecName: "kube-api-access-fmtvj") pod "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" (UID: "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef"). InnerVolumeSpecName "kube-api-access-fmtvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.020225 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efe8d5fc-c754-4243-bf18-af182791a0a4-kube-api-access-6dfg9" (OuterVolumeSpecName: "kube-api-access-6dfg9") pod "efe8d5fc-c754-4243-bf18-af182791a0a4" (UID: "efe8d5fc-c754-4243-bf18-af182791a0a4"). InnerVolumeSpecName "kube-api-access-6dfg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.022513 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" (UID: "e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.025345 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2a434d9-d285-4564-aec6-592d14b749fa-kube-api-access-smnds" (OuterVolumeSpecName: "kube-api-access-smnds") pod "a2a434d9-d285-4564-aec6-592d14b749fa" (UID: "a2a434d9-d285-4564-aec6-592d14b749fa"). InnerVolumeSpecName "kube-api-access-smnds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.025447 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" (UID: "1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.037333 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-config" (OuterVolumeSpecName: "config") pod "1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" (UID: "1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.075157 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" (UID: "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.081528 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-config" (OuterVolumeSpecName: "config") pod "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" (UID: "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084024 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084088 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084107 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smnds\" (UniqueName: \"kubernetes.io/projected/a2a434d9-d285-4564-aec6-592d14b749fa-kube-api-access-smnds\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084127 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a434d9-d285-4564-aec6-592d14b749fa-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084147 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084163 4701 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084180 4701 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efe8d5fc-c754-4243-bf18-af182791a0a4-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084195 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084252 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efe8d5fc-c754-4243-bf18-af182791a0a4-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084269 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a434d9-d285-4564-aec6-592d14b749fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084286 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnbp2\" (UniqueName: \"kubernetes.io/projected/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-kube-api-access-bnbp2\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084303 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084321 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084336 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084352 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rht9m\" (UniqueName: \"kubernetes.io/projected/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1-kube-api-access-rht9m\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084369 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efe8d5fc-c754-4243-bf18-af182791a0a4-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084385 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084401 4701 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a434d9-d285-4564-aec6-592d14b749fa-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084420 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dfg9\" (UniqueName: \"kubernetes.io/projected/efe8d5fc-c754-4243-bf18-af182791a0a4-kube-api-access-6dfg9\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084437 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmtvj\" (UniqueName: \"kubernetes.io/projected/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-kube-api-access-fmtvj\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.084453 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.089095 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" (UID: "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.089433 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" (UID: "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.102803 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" (UID: "e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.186994 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.187481 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.187491 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.836692 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565b678645-7jbt7" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.836742 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-74f5944bd9-kv77f" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.836773 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-69n94" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.836709 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78dcd8d87c-pf5gj" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.837587 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.915130 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-74f5944bd9-kv77f"] Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.938544 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-74f5944bd9-kv77f"] Nov 21 19:19:56 crc kubenswrapper[4701]: I1121 19:19:56.989274 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-565b678645-7jbt7"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:56.998565 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-565b678645-7jbt7"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.034120 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78dcd8d87c-pf5gj"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.045317 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-78dcd8d87c-pf5gj"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.064701 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85789dd45c-hpddc"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.071114 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85789dd45c-hpddc"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.243939 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-645bbc848c-zl97c"] Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.244529 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="init" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.244551 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="init" Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.244571 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" containerName="neutron-db-sync" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.244580 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" containerName="neutron-db-sync" Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.244593 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.244600 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.244784 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.244800 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" containerName="neutron-db-sync" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.246029 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.305321 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-645bbc848c-zl97c"] Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.314434 4701 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.314921 4701 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.164:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.315269 4701 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:38.102.83.164:5001/podified-master-centos10/openstack-cinder-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vjwr4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-69chf_openstack(d5b93dd5-e6da-4f02-ac4d-b89773e967d3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.317088 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-69chf" podUID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.324789 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-config\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.325182 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-sb\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.325386 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbj2t\" (UniqueName: \"kubernetes.io/projected/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-kube-api-access-nbj2t\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.325489 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-svc\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.325539 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-swift-storage-0\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.325569 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-nb\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.358148 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8697b77d4d-84dj8"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.361026 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.365340 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.365687 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-dd6b9" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.365897 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.366116 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.382860 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8697b77d4d-84dj8"] Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.437184 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-httpd-config\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.438370 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-config\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.438446 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-combined-ca-bundle\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.438771 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-sb\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439001 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-ovndb-tls-certs\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439075 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbj2t\" (UniqueName: \"kubernetes.io/projected/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-kube-api-access-nbj2t\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439146 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-svc\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439237 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-swift-storage-0\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439274 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-nb\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439329 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp59s\" (UniqueName: \"kubernetes.io/projected/6084dcd4-0556-4a7d-b880-1979a9c36609-kube-api-access-pp59s\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439367 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-config\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.439824 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-sb\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.440486 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-svc\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.440619 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-config\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.442142 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-swift-storage-0\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.442432 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-nb\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.470801 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbj2t\" (UniqueName: \"kubernetes.io/projected/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-kube-api-access-nbj2t\") pod \"dnsmasq-dns-645bbc848c-zl97c\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.541525 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-httpd-config\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.542032 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-combined-ca-bundle\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.542053 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-config\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.542124 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-ovndb-tls-certs\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.542242 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp59s\" (UniqueName: \"kubernetes.io/projected/6084dcd4-0556-4a7d-b880-1979a9c36609-kube-api-access-pp59s\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.552165 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-config\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.553647 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-combined-ca-bundle\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.558614 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-ovndb-tls-certs\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.565373 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp59s\" (UniqueName: \"kubernetes.io/projected/6084dcd4-0556-4a7d-b880-1979a9c36609-kube-api-access-pp59s\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.584052 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-httpd-config\") pod \"neutron-8697b77d4d-84dj8\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.730497 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.745927 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:19:57 crc kubenswrapper[4701]: E1121 19:19:57.941392 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.164:5001/podified-master-centos10/openstack-cinder-api:watcher_latest\\\"\"" pod="openstack/cinder-db-sync-69chf" podUID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.966113 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2a434d9-d285-4564-aec6-592d14b749fa" path="/var/lib/kubelet/pods/a2a434d9-d285-4564-aec6-592d14b749fa/volumes" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.966624 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0" path="/var/lib/kubelet/pods/e7f999b9-6de9-45e8-b9a1-d5a9054c5cc0/volumes" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.972083 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" path="/var/lib/kubelet/pods/e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef/volumes" Nov 21 19:19:57 crc kubenswrapper[4701]: I1121 19:19:57.973959 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efe8d5fc-c754-4243-bf18-af182791a0a4" path="/var/lib/kubelet/pods/efe8d5fc-c754-4243-bf18-af182791a0a4/volumes" Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.258594 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.274147 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.287736 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-869574dbc6-l96tx"] Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.304650 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:19:58 crc kubenswrapper[4701]: W1121 19:19:58.309718 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d8b1846_dcd5_49b4_8eb2_74b0462538e1.slice/crio-e6085745306ae128c098d380081a45da7f91c908e498738b1a07682baeff3854 WatchSource:0}: Error finding container e6085745306ae128c098d380081a45da7f91c908e498738b1a07682baeff3854: Status 404 returned error can't find the container with id e6085745306ae128c098d380081a45da7f91c908e498738b1a07682baeff3854 Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.312443 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c68b8ff68-tfcgs"] Nov 21 19:19:58 crc kubenswrapper[4701]: W1121 19:19:58.499654 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3255fe0_3c69_4ce9_a9e7_823c35dcebbe.slice/crio-fb101576134e4e8d9df6cb6d744355fb6d78d3819313182c4fe1629502b7473a WatchSource:0}: Error finding container fb101576134e4e8d9df6cb6d744355fb6d78d3819313182c4fe1629502b7473a: Status 404 returned error can't find the container with id fb101576134e4e8d9df6cb6d744355fb6d78d3819313182c4fe1629502b7473a Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.502038 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-645bbc848c-zl97c"] Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.599311 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-c699r"] Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.608832 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.947797 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerStarted","Data":"92eefb8b5ec60ad5206795726c8f8f3ab22a42fcf3b93e43561d3c5d55629a69"} Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.950510 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869574dbc6-l96tx" event={"ID":"1c543587-173c-4fb2-b730-72b848f845d6","Type":"ContainerStarted","Data":"32e52f609acf5e70d5c372dfb6d4b8fec3c2f408b85ac21539daa39172bd37a5"} Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.952300 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c68b8ff68-tfcgs" event={"ID":"7d8b1846-dcd5-49b4-8eb2-74b0462538e1","Type":"ContainerStarted","Data":"e6085745306ae128c098d380081a45da7f91c908e498738b1a07682baeff3854"} Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.953991 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" event={"ID":"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe","Type":"ContainerStarted","Data":"fb101576134e4e8d9df6cb6d744355fb6d78d3819313182c4fe1629502b7473a"} Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.955319 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-c699r" event={"ID":"12e527f7-2e4d-421a-8639-e282a383774a","Type":"ContainerStarted","Data":"185374c5bbb20891def6197ffb983ae87d964b626c53b65b0bd7244624a26a80"} Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.962332 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f","Type":"ContainerStarted","Data":"c277097afa6cf8da7d71bec4769d76ce5d5823603ab69a5e9acbfce37ba3ccbe"} Nov 21 19:19:58 crc kubenswrapper[4701]: I1121 19:19:58.963941 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a","Type":"ContainerStarted","Data":"4a7fdd395a44c4caabd353dd21ac297e016c5c5ed4291c3548bae811ee696515"} Nov 21 19:19:59 crc kubenswrapper[4701]: I1121 19:19:59.309806 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8697b77d4d-84dj8"] Nov 21 19:19:59 crc kubenswrapper[4701]: W1121 19:19:59.313931 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6084dcd4_0556_4a7d_b880_1979a9c36609.slice/crio-6baa75c0a1957f76a0c5323943023519bfa2a1222f56a93100f7801082a125fb WatchSource:0}: Error finding container 6baa75c0a1957f76a0c5323943023519bfa2a1222f56a93100f7801082a125fb: Status 404 returned error can't find the container with id 6baa75c0a1957f76a0c5323943023519bfa2a1222f56a93100f7801082a125fb Nov 21 19:19:59 crc kubenswrapper[4701]: I1121 19:19:59.976017 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85789dd45c-hpddc" podUID="e90a4e3a-8c3b-44f4-9df1-c36f3aeff9ef" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: i/o timeout" Nov 21 19:19:59 crc kubenswrapper[4701]: I1121 19:19:59.976278 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfsds" event={"ID":"91518a76-c4e2-4f08-831a-aa8fb9d4778c","Type":"ContainerStarted","Data":"d7629fa548c4c840a30c8117231ecfbe33d71e948b82c9e7e915673b9ce0fd48"} Nov 21 19:19:59 crc kubenswrapper[4701]: I1121 19:19:59.981634 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerStarted","Data":"b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be"} Nov 21 19:19:59 crc kubenswrapper[4701]: I1121 19:19:59.983528 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rp9lc" event={"ID":"0a93232c-afb8-4ff5-8775-5c3574997149","Type":"ContainerStarted","Data":"43627ff489f97e02acf7393717d1fff2e12c77e2238e7eabf64abba14d9a3ff8"} Nov 21 19:19:59 crc kubenswrapper[4701]: I1121 19:19:59.988888 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8697b77d4d-84dj8" event={"ID":"6084dcd4-0556-4a7d-b880-1979a9c36609","Type":"ContainerStarted","Data":"6baa75c0a1957f76a0c5323943023519bfa2a1222f56a93100f7801082a125fb"} Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.694026 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5b76b98545-tv4h2"] Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.696701 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.699101 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.705771 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.711895 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5b76b98545-tv4h2"] Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783372 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-public-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783427 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-config\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783451 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-combined-ca-bundle\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783535 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-internal-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783559 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-ovndb-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783585 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmvlv\" (UniqueName: \"kubernetes.io/projected/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-kube-api-access-qmvlv\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.783614 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-httpd-config\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.888790 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-internal-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.888898 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-ovndb-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.888937 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmvlv\" (UniqueName: \"kubernetes.io/projected/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-kube-api-access-qmvlv\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.888973 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-httpd-config\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.889012 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-public-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.889059 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-config\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.889427 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-combined-ca-bundle\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.902064 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-internal-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.902455 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-httpd-config\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.902634 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-ovndb-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.903694 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-public-tls-certs\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.915193 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-config\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.925072 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmvlv\" (UniqueName: \"kubernetes.io/projected/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-kube-api-access-qmvlv\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:00 crc kubenswrapper[4701]: I1121 19:20:00.939718 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc0c9dc-fc55-43fb-a2bb-727c01863fb5-combined-ca-bundle\") pod \"neutron-5b76b98545-tv4h2\" (UID: \"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5\") " pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.020314 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.037441 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8697b77d4d-84dj8" event={"ID":"6084dcd4-0556-4a7d-b880-1979a9c36609","Type":"ContainerStarted","Data":"72f3c9d31797acd8881ba733353698dc04c69993e3a991fd1273ac0a40cae63e"} Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.037507 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8697b77d4d-84dj8" event={"ID":"6084dcd4-0556-4a7d-b880-1979a9c36609","Type":"ContainerStarted","Data":"daeb86b04da5d06629353e1d5951a542bbc5b3a3e4a0797d5922b2a056b643bb"} Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.039380 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.068621 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8697b77d4d-84dj8" podStartSLOduration=4.068600753 podStartE2EDuration="4.068600753s" podCreationTimestamp="2025-11-21 19:19:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:01.060495657 +0000 UTC m=+1091.845635684" watchObservedRunningTime="2025-11-21 19:20:01.068600753 +0000 UTC m=+1091.853740780" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.123979 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a","Type":"ContainerStarted","Data":"4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe"} Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.124035 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a","Type":"ContainerStarted","Data":"7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9"} Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.124991 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.202036 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=35.202009553 podStartE2EDuration="35.202009553s" podCreationTimestamp="2025-11-21 19:19:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:01.191884492 +0000 UTC m=+1091.977024519" watchObservedRunningTime="2025-11-21 19:20:01.202009553 +0000 UTC m=+1091.987149580" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.202670 4701 generic.go:334] "Generic (PLEG): container finished" podID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerID="334e96c104352661af00d662bbd12d44d1abd6d9768b71563c10d380ffc573a1" exitCode=0 Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.202717 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" event={"ID":"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe","Type":"ContainerDied","Data":"334e96c104352661af00d662bbd12d44d1abd6d9768b71563c10d380ffc573a1"} Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.215770 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-c699r" event={"ID":"12e527f7-2e4d-421a-8639-e282a383774a","Type":"ContainerStarted","Data":"b698f1c37dc783e724d65f9ec1082a2372bce8259a478087fe49beb91cbea405"} Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.303424 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-c699r" podStartSLOduration=21.303390566 podStartE2EDuration="21.303390566s" podCreationTimestamp="2025-11-21 19:19:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:01.29007259 +0000 UTC m=+1092.075212617" watchObservedRunningTime="2025-11-21 19:20:01.303390566 +0000 UTC m=+1092.088530593" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.348918 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-kfsds" podStartSLOduration=5.924796398 podStartE2EDuration="45.348896724s" podCreationTimestamp="2025-11-21 19:19:16 +0000 UTC" firstStartedPulling="2025-11-21 19:19:18.025258788 +0000 UTC m=+1048.810398815" lastFinishedPulling="2025-11-21 19:19:57.449359114 +0000 UTC m=+1088.234499141" observedRunningTime="2025-11-21 19:20:01.338087585 +0000 UTC m=+1092.123227612" watchObservedRunningTime="2025-11-21 19:20:01.348896724 +0000 UTC m=+1092.134036751" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.366510 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-rp9lc" podStartSLOduration=6.209781583 podStartE2EDuration="45.366494315s" podCreationTimestamp="2025-11-21 19:19:16 +0000 UTC" firstStartedPulling="2025-11-21 19:19:18.068744642 +0000 UTC m=+1048.853884669" lastFinishedPulling="2025-11-21 19:19:57.225457374 +0000 UTC m=+1088.010597401" observedRunningTime="2025-11-21 19:20:01.362803476 +0000 UTC m=+1092.147943503" watchObservedRunningTime="2025-11-21 19:20:01.366494315 +0000 UTC m=+1092.151634342" Nov 21 19:20:01 crc kubenswrapper[4701]: I1121 19:20:01.430362 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.228588 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869574dbc6-l96tx" event={"ID":"1c543587-173c-4fb2-b730-72b848f845d6","Type":"ContainerStarted","Data":"430f8778df60ba3d059bc9aa9fa12d81c20d41994db5d5fd007530b6d67dbe5f"} Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.238089 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c68b8ff68-tfcgs" event={"ID":"7d8b1846-dcd5-49b4-8eb2-74b0462538e1","Type":"ContainerStarted","Data":"4a8b007513f75f88726dd3804667d037658c83b72291f3d90e7bfdf62b9c30ab"} Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.238273 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c68b8ff68-tfcgs" event={"ID":"7d8b1846-dcd5-49b4-8eb2-74b0462538e1","Type":"ContainerStarted","Data":"070ffba9890a61f311a6fd1e28f5b4001eda2f86636c0b2e78d3c8b4f22bcce0"} Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.242935 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" event={"ID":"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe","Type":"ContainerStarted","Data":"deac78af57938ba8c2702d53fab56213ab3eeaba278c20bd4e58e157fcca19ca"} Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.243536 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.259532 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6c68b8ff68-tfcgs" podStartSLOduration=34.976698615 podStartE2EDuration="37.259490321s" podCreationTimestamp="2025-11-21 19:19:25 +0000 UTC" firstStartedPulling="2025-11-21 19:19:58.333001061 +0000 UTC m=+1089.118141088" lastFinishedPulling="2025-11-21 19:20:00.615792767 +0000 UTC m=+1091.400932794" observedRunningTime="2025-11-21 19:20:02.256132901 +0000 UTC m=+1093.041272928" watchObservedRunningTime="2025-11-21 19:20:02.259490321 +0000 UTC m=+1093.044630358" Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.287509 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" podStartSLOduration=5.28748143 podStartE2EDuration="5.28748143s" podCreationTimestamp="2025-11-21 19:19:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:02.272227422 +0000 UTC m=+1093.057367449" watchObservedRunningTime="2025-11-21 19:20:02.28748143 +0000 UTC m=+1093.072621457" Nov 21 19:20:02 crc kubenswrapper[4701]: I1121 19:20:02.713712 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5b76b98545-tv4h2"] Nov 21 19:20:03 crc kubenswrapper[4701]: I1121 19:20:03.253575 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:20:04 crc kubenswrapper[4701]: I1121 19:20:04.249587 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 21 19:20:05 crc kubenswrapper[4701]: I1121 19:20:05.764968 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:20:05 crc kubenswrapper[4701]: I1121 19:20:05.765049 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:20:06 crc kubenswrapper[4701]: I1121 19:20:06.427170 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 21 19:20:06 crc kubenswrapper[4701]: I1121 19:20:06.435255 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 21 19:20:07 crc kubenswrapper[4701]: I1121 19:20:07.323910 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b76b98545-tv4h2" event={"ID":"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5","Type":"ContainerStarted","Data":"2c336470823e39bbe299f4d5a11e143bee651488b60842841ae27cbdc540905a"} Nov 21 19:20:07 crc kubenswrapper[4701]: I1121 19:20:07.334102 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 21 19:20:07 crc kubenswrapper[4701]: I1121 19:20:07.732415 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:20:07 crc kubenswrapper[4701]: I1121 19:20:07.814612 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bd46686f5-2rps9"] Nov 21 19:20:07 crc kubenswrapper[4701]: I1121 19:20:07.814924 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" podUID="ae589118-c566-4aea-9183-6c67706aee1e" containerName="dnsmasq-dns" containerID="cri-o://29f2660b599054b49bbcdedef9a95527e7a187beb99e2ccada4764be2a7a69c0" gracePeriod=10 Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.349675 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sklns" event={"ID":"87272c2c-3166-4a6a-aff9-41278b0b1b51","Type":"ContainerStarted","Data":"7b177444e98fadf0cc0025209b7d78991c3bced6b8dad01552c748c345076aa4"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.361313 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerStarted","Data":"92827e25281b8993835100f95f59694457273f1eed9546f58a23850bd3b6a025"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.385424 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-sklns" podStartSLOduration=3.728761061 podStartE2EDuration="49.385407287s" podCreationTimestamp="2025-11-21 19:19:19 +0000 UTC" firstStartedPulling="2025-11-21 19:19:21.36471451 +0000 UTC m=+1052.149854537" lastFinishedPulling="2025-11-21 19:20:07.021360736 +0000 UTC m=+1097.806500763" observedRunningTime="2025-11-21 19:20:08.378732038 +0000 UTC m=+1099.163872065" watchObservedRunningTime="2025-11-21 19:20:08.385407287 +0000 UTC m=+1099.170547314" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.390529 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869574dbc6-l96tx" event={"ID":"1c543587-173c-4fb2-b730-72b848f845d6","Type":"ContainerStarted","Data":"bec4522f6cfd9c8b102a94c7b53c32fdf41063ea44f1a6d97edd1f49d43218f1"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.398301 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=33.650400124 podStartE2EDuration="42.398283592s" podCreationTimestamp="2025-11-21 19:19:26 +0000 UTC" firstStartedPulling="2025-11-21 19:19:58.304467607 +0000 UTC m=+1089.089607624" lastFinishedPulling="2025-11-21 19:20:07.052351065 +0000 UTC m=+1097.837491092" observedRunningTime="2025-11-21 19:20:08.39673126 +0000 UTC m=+1099.181871287" watchObservedRunningTime="2025-11-21 19:20:08.398283592 +0000 UTC m=+1099.183423619" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.427313 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerStarted","Data":"a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.435972 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae589118-c566-4aea-9183-6c67706aee1e" containerID="29f2660b599054b49bbcdedef9a95527e7a187beb99e2ccada4764be2a7a69c0" exitCode=0 Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.436258 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" event={"ID":"ae589118-c566-4aea-9183-6c67706aee1e","Type":"ContainerDied","Data":"29f2660b599054b49bbcdedef9a95527e7a187beb99e2ccada4764be2a7a69c0"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.448138 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b76b98545-tv4h2" event={"ID":"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5","Type":"ContainerStarted","Data":"e5cd4673e7a5f414f549ccc09aefd5df11de824b2deae7e2b9a1370a92d7f3a2"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.448193 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b76b98545-tv4h2" event={"ID":"1fc0c9dc-fc55-43fb-a2bb-727c01863fb5","Type":"ContainerStarted","Data":"16d066c81a3d6fe66d0b36d66926bd8e4cdfcad5fc9c58a2e8294bd6c2a61dea"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.448501 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.477582 4701 generic.go:334] "Generic (PLEG): container finished" podID="12e527f7-2e4d-421a-8639-e282a383774a" containerID="b698f1c37dc783e724d65f9ec1082a2372bce8259a478087fe49beb91cbea405" exitCode=0 Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.477704 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-c699r" event={"ID":"12e527f7-2e4d-421a-8639-e282a383774a","Type":"ContainerDied","Data":"b698f1c37dc783e724d65f9ec1082a2372bce8259a478087fe49beb91cbea405"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.478463 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.484668 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f","Type":"ContainerStarted","Data":"925925361da106aa552127959f908696b2b8b19ef1df68e87c10982bd8c7d98e"} Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.501367 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-869574dbc6-l96tx" podStartSLOduration=41.193988658 podStartE2EDuration="43.50134037s" podCreationTimestamp="2025-11-21 19:19:25 +0000 UTC" firstStartedPulling="2025-11-21 19:19:58.310087858 +0000 UTC m=+1089.095227885" lastFinishedPulling="2025-11-21 19:20:00.61743957 +0000 UTC m=+1091.402579597" observedRunningTime="2025-11-21 19:20:08.439533066 +0000 UTC m=+1099.224673093" watchObservedRunningTime="2025-11-21 19:20:08.50134037 +0000 UTC m=+1099.286480387" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.522353 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5b76b98545-tv4h2" podStartSLOduration=8.522334441 podStartE2EDuration="8.522334441s" podCreationTimestamp="2025-11-21 19:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:08.485925437 +0000 UTC m=+1099.271065464" watchObservedRunningTime="2025-11-21 19:20:08.522334441 +0000 UTC m=+1099.307474468" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.547253 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=33.793628505 podStartE2EDuration="42.547235407s" podCreationTimestamp="2025-11-21 19:19:26 +0000 UTC" firstStartedPulling="2025-11-21 19:19:58.268223537 +0000 UTC m=+1089.053363564" lastFinishedPulling="2025-11-21 19:20:07.021830439 +0000 UTC m=+1097.806970466" observedRunningTime="2025-11-21 19:20:08.536791658 +0000 UTC m=+1099.321931685" watchObservedRunningTime="2025-11-21 19:20:08.547235407 +0000 UTC m=+1099.332375434" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.574881 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-config\") pod \"ae589118-c566-4aea-9183-6c67706aee1e\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.574973 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-sb\") pod \"ae589118-c566-4aea-9183-6c67706aee1e\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.575080 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-nb\") pod \"ae589118-c566-4aea-9183-6c67706aee1e\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.575104 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-svc\") pod \"ae589118-c566-4aea-9183-6c67706aee1e\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.575176 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-swift-storage-0\") pod \"ae589118-c566-4aea-9183-6c67706aee1e\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.575193 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78dds\" (UniqueName: \"kubernetes.io/projected/ae589118-c566-4aea-9183-6c67706aee1e-kube-api-access-78dds\") pod \"ae589118-c566-4aea-9183-6c67706aee1e\" (UID: \"ae589118-c566-4aea-9183-6c67706aee1e\") " Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.615168 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae589118-c566-4aea-9183-6c67706aee1e-kube-api-access-78dds" (OuterVolumeSpecName: "kube-api-access-78dds") pod "ae589118-c566-4aea-9183-6c67706aee1e" (UID: "ae589118-c566-4aea-9183-6c67706aee1e"). InnerVolumeSpecName "kube-api-access-78dds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.638930 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ae589118-c566-4aea-9183-6c67706aee1e" (UID: "ae589118-c566-4aea-9183-6c67706aee1e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.642247 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-config" (OuterVolumeSpecName: "config") pod "ae589118-c566-4aea-9183-6c67706aee1e" (UID: "ae589118-c566-4aea-9183-6c67706aee1e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.650377 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ae589118-c566-4aea-9183-6c67706aee1e" (UID: "ae589118-c566-4aea-9183-6c67706aee1e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.655783 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ae589118-c566-4aea-9183-6c67706aee1e" (UID: "ae589118-c566-4aea-9183-6c67706aee1e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.679446 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.679645 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.679704 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78dds\" (UniqueName: \"kubernetes.io/projected/ae589118-c566-4aea-9183-6c67706aee1e-kube-api-access-78dds\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.679764 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.679814 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.722413 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ae589118-c566-4aea-9183-6c67706aee1e" (UID: "ae589118-c566-4aea-9183-6c67706aee1e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:08 crc kubenswrapper[4701]: I1121 19:20:08.781843 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae589118-c566-4aea-9183-6c67706aee1e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.496160 4701 generic.go:334] "Generic (PLEG): container finished" podID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" containerID="d7629fa548c4c840a30c8117231ecfbe33d71e948b82c9e7e915673b9ce0fd48" exitCode=0 Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.496303 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfsds" event={"ID":"91518a76-c4e2-4f08-831a-aa8fb9d4778c","Type":"ContainerDied","Data":"d7629fa548c4c840a30c8117231ecfbe33d71e948b82c9e7e915673b9ce0fd48"} Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.511531 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" event={"ID":"ae589118-c566-4aea-9183-6c67706aee1e","Type":"ContainerDied","Data":"231beea94d8b876f138d80b95a2c1258da0698073ecf48b900c30e687ed3860b"} Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.511623 4701 scope.go:117] "RemoveContainer" containerID="29f2660b599054b49bbcdedef9a95527e7a187beb99e2ccada4764be2a7a69c0" Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.511641 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd46686f5-2rps9" Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.557775 4701 scope.go:117] "RemoveContainer" containerID="4a3c951b2d7618481f56abc802b8e06158e81065deb3977eb8e05e5160205595" Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.557932 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bd46686f5-2rps9"] Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.572651 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bd46686f5-2rps9"] Nov 21 19:20:09 crc kubenswrapper[4701]: I1121 19:20:09.997247 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-c699r" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.008339 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae589118-c566-4aea-9183-6c67706aee1e" path="/var/lib/kubelet/pods/ae589118-c566-4aea-9183-6c67706aee1e/volumes" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.127923 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-combined-ca-bundle\") pod \"12e527f7-2e4d-421a-8639-e282a383774a\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.128005 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-fernet-keys\") pod \"12e527f7-2e4d-421a-8639-e282a383774a\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.128110 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-config-data\") pod \"12e527f7-2e4d-421a-8639-e282a383774a\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.128138 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-scripts\") pod \"12e527f7-2e4d-421a-8639-e282a383774a\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.128232 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q6rp\" (UniqueName: \"kubernetes.io/projected/12e527f7-2e4d-421a-8639-e282a383774a-kube-api-access-2q6rp\") pod \"12e527f7-2e4d-421a-8639-e282a383774a\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.128260 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-credential-keys\") pod \"12e527f7-2e4d-421a-8639-e282a383774a\" (UID: \"12e527f7-2e4d-421a-8639-e282a383774a\") " Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.139413 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12e527f7-2e4d-421a-8639-e282a383774a-kube-api-access-2q6rp" (OuterVolumeSpecName: "kube-api-access-2q6rp") pod "12e527f7-2e4d-421a-8639-e282a383774a" (UID: "12e527f7-2e4d-421a-8639-e282a383774a"). InnerVolumeSpecName "kube-api-access-2q6rp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.139934 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "12e527f7-2e4d-421a-8639-e282a383774a" (UID: "12e527f7-2e4d-421a-8639-e282a383774a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.155965 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "12e527f7-2e4d-421a-8639-e282a383774a" (UID: "12e527f7-2e4d-421a-8639-e282a383774a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.177409 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-scripts" (OuterVolumeSpecName: "scripts") pod "12e527f7-2e4d-421a-8639-e282a383774a" (UID: "12e527f7-2e4d-421a-8639-e282a383774a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.190365 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-config-data" (OuterVolumeSpecName: "config-data") pod "12e527f7-2e4d-421a-8639-e282a383774a" (UID: "12e527f7-2e4d-421a-8639-e282a383774a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.208294 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12e527f7-2e4d-421a-8639-e282a383774a" (UID: "12e527f7-2e4d-421a-8639-e282a383774a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.229718 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q6rp\" (UniqueName: \"kubernetes.io/projected/12e527f7-2e4d-421a-8639-e282a383774a-kube-api-access-2q6rp\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.229753 4701 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.229766 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.229775 4701 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.229783 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.229793 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12e527f7-2e4d-421a-8639-e282a383774a-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.566330 4701 generic.go:334] "Generic (PLEG): container finished" podID="0a93232c-afb8-4ff5-8775-5c3574997149" containerID="43627ff489f97e02acf7393717d1fff2e12c77e2238e7eabf64abba14d9a3ff8" exitCode=0 Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.566429 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rp9lc" event={"ID":"0a93232c-afb8-4ff5-8775-5c3574997149","Type":"ContainerDied","Data":"43627ff489f97e02acf7393717d1fff2e12c77e2238e7eabf64abba14d9a3ff8"} Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.571716 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-c699r" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.571814 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-c699r" event={"ID":"12e527f7-2e4d-421a-8639-e282a383774a","Type":"ContainerDied","Data":"185374c5bbb20891def6197ffb983ae87d964b626c53b65b0bd7244624a26a80"} Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.573443 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="185374c5bbb20891def6197ffb983ae87d964b626c53b65b0bd7244624a26a80" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.708472 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-65d76b5c54-c9d89"] Nov 21 19:20:10 crc kubenswrapper[4701]: E1121 19:20:10.709027 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12e527f7-2e4d-421a-8639-e282a383774a" containerName="keystone-bootstrap" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.709050 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="12e527f7-2e4d-421a-8639-e282a383774a" containerName="keystone-bootstrap" Nov 21 19:20:10 crc kubenswrapper[4701]: E1121 19:20:10.709071 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae589118-c566-4aea-9183-6c67706aee1e" containerName="dnsmasq-dns" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.709078 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae589118-c566-4aea-9183-6c67706aee1e" containerName="dnsmasq-dns" Nov 21 19:20:10 crc kubenswrapper[4701]: E1121 19:20:10.709090 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae589118-c566-4aea-9183-6c67706aee1e" containerName="init" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.709097 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae589118-c566-4aea-9183-6c67706aee1e" containerName="init" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.713836 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="12e527f7-2e4d-421a-8639-e282a383774a" containerName="keystone-bootstrap" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.713925 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae589118-c566-4aea-9183-6c67706aee1e" containerName="dnsmasq-dns" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.715107 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.721659 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-65d76b5c54-c9d89"] Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.723621 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.723788 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.723959 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-27z5d" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.724071 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.724173 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.725450 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.756948 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-fernet-keys\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.756990 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-combined-ca-bundle\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.757063 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-config-data\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.757087 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mnml\" (UniqueName: \"kubernetes.io/projected/dee81498-90dd-46c0-949f-c3de3b9bfbd3-kube-api-access-6mnml\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.757108 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-credential-keys\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.757145 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-public-tls-certs\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.764949 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-scripts\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.765094 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-internal-tls-certs\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.868715 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-fernet-keys\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869115 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-combined-ca-bundle\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869184 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-config-data\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869231 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mnml\" (UniqueName: \"kubernetes.io/projected/dee81498-90dd-46c0-949f-c3de3b9bfbd3-kube-api-access-6mnml\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869257 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-credential-keys\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869294 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-public-tls-certs\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869339 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-scripts\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.869369 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-internal-tls-certs\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.878826 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-combined-ca-bundle\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.880005 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-config-data\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.883515 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-internal-tls-certs\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.890143 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-scripts\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.893786 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-fernet-keys\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.912926 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mnml\" (UniqueName: \"kubernetes.io/projected/dee81498-90dd-46c0-949f-c3de3b9bfbd3-kube-api-access-6mnml\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.914314 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-public-tls-certs\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:10 crc kubenswrapper[4701]: I1121 19:20:10.915856 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dee81498-90dd-46c0-949f-c3de3b9bfbd3-credential-keys\") pod \"keystone-65d76b5c54-c9d89\" (UID: \"dee81498-90dd-46c0-949f-c3de3b9bfbd3\") " pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.025800 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfsds" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.062096 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.175500 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-scripts\") pod \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.175709 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-config-data\") pod \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.175756 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gj5sj\" (UniqueName: \"kubernetes.io/projected/91518a76-c4e2-4f08-831a-aa8fb9d4778c-kube-api-access-gj5sj\") pod \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.175843 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-combined-ca-bundle\") pod \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.175920 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91518a76-c4e2-4f08-831a-aa8fb9d4778c-logs\") pod \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\" (UID: \"91518a76-c4e2-4f08-831a-aa8fb9d4778c\") " Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.179307 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91518a76-c4e2-4f08-831a-aa8fb9d4778c-logs" (OuterVolumeSpecName: "logs") pod "91518a76-c4e2-4f08-831a-aa8fb9d4778c" (UID: "91518a76-c4e2-4f08-831a-aa8fb9d4778c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.186739 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-scripts" (OuterVolumeSpecName: "scripts") pod "91518a76-c4e2-4f08-831a-aa8fb9d4778c" (UID: "91518a76-c4e2-4f08-831a-aa8fb9d4778c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.216856 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91518a76-c4e2-4f08-831a-aa8fb9d4778c-kube-api-access-gj5sj" (OuterVolumeSpecName: "kube-api-access-gj5sj") pod "91518a76-c4e2-4f08-831a-aa8fb9d4778c" (UID: "91518a76-c4e2-4f08-831a-aa8fb9d4778c"). InnerVolumeSpecName "kube-api-access-gj5sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.233970 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91518a76-c4e2-4f08-831a-aa8fb9d4778c" (UID: "91518a76-c4e2-4f08-831a-aa8fb9d4778c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.266933 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-config-data" (OuterVolumeSpecName: "config-data") pod "91518a76-c4e2-4f08-831a-aa8fb9d4778c" (UID: "91518a76-c4e2-4f08-831a-aa8fb9d4778c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.310964 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.311006 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gj5sj\" (UniqueName: \"kubernetes.io/projected/91518a76-c4e2-4f08-831a-aa8fb9d4778c-kube-api-access-gj5sj\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.311020 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.311031 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91518a76-c4e2-4f08-831a-aa8fb9d4778c-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.311043 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91518a76-c4e2-4f08-831a-aa8fb9d4778c-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.416060 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.416307 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api-log" containerID="cri-o://7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9" gracePeriod=30 Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.416934 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api" containerID="cri-o://4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe" gracePeriod=30 Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.508427 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.623655 4701 generic.go:334] "Generic (PLEG): container finished" podID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerID="7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9" exitCode=143 Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.623822 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a","Type":"ContainerDied","Data":"7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9"} Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.631143 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfsds" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.631357 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfsds" event={"ID":"91518a76-c4e2-4f08-831a-aa8fb9d4778c","Type":"ContainerDied","Data":"023813cb056ac2bc047969482284e5c13e94511b623a4e3dd83d98be95f4b5e9"} Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.631464 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="023813cb056ac2bc047969482284e5c13e94511b623a4e3dd83d98be95f4b5e9" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.646086 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-78b94b5b48-685pj"] Nov 21 19:20:11 crc kubenswrapper[4701]: E1121 19:20:11.646776 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" containerName="placement-db-sync" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.646879 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" containerName="placement-db-sync" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.647142 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" containerName="placement-db-sync" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.648966 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.660994 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.661957 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.662005 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hpsk9" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.662454 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.663359 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.673753 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-78b94b5b48-685pj"] Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721186 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-public-tls-certs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721278 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-scripts\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721336 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-combined-ca-bundle\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721370 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-logs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721395 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-config-data\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721447 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btngx\" (UniqueName: \"kubernetes.io/projected/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-kube-api-access-btngx\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.721466 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-internal-tls-certs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.765304 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-65d76b5c54-c9d89"] Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823287 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-scripts\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823482 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-combined-ca-bundle\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823522 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-logs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823546 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-config-data\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823593 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btngx\" (UniqueName: \"kubernetes.io/projected/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-kube-api-access-btngx\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823610 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-internal-tls-certs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.823671 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-public-tls-certs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.824517 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-logs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.826882 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-combined-ca-bundle\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.827838 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-scripts\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.828247 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-config-data\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.829843 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-internal-tls-certs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.832827 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-public-tls-certs\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.848578 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btngx\" (UniqueName: \"kubernetes.io/projected/c5db43f2-147c-4625-9f3a-9f68cc6afa8c-kube-api-access-btngx\") pod \"placement-78b94b5b48-685pj\" (UID: \"c5db43f2-147c-4625-9f3a-9f68cc6afa8c\") " pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:11 crc kubenswrapper[4701]: I1121 19:20:11.990412 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.291009 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.337913 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-combined-ca-bundle\") pod \"0a93232c-afb8-4ff5-8775-5c3574997149\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.338742 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-db-sync-config-data\") pod \"0a93232c-afb8-4ff5-8775-5c3574997149\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.338866 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkth4\" (UniqueName: \"kubernetes.io/projected/0a93232c-afb8-4ff5-8775-5c3574997149-kube-api-access-hkth4\") pod \"0a93232c-afb8-4ff5-8775-5c3574997149\" (UID: \"0a93232c-afb8-4ff5-8775-5c3574997149\") " Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.389055 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0a93232c-afb8-4ff5-8775-5c3574997149" (UID: "0a93232c-afb8-4ff5-8775-5c3574997149"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.390819 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a93232c-afb8-4ff5-8775-5c3574997149-kube-api-access-hkth4" (OuterVolumeSpecName: "kube-api-access-hkth4") pod "0a93232c-afb8-4ff5-8775-5c3574997149" (UID: "0a93232c-afb8-4ff5-8775-5c3574997149"). InnerVolumeSpecName "kube-api-access-hkth4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.396948 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a93232c-afb8-4ff5-8775-5c3574997149" (UID: "0a93232c-afb8-4ff5-8775-5c3574997149"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.442688 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.442725 4701 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0a93232c-afb8-4ff5-8775-5c3574997149-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.442738 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkth4\" (UniqueName: \"kubernetes.io/projected/0a93232c-afb8-4ff5-8775-5c3574997149-kube-api-access-hkth4\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.662134 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rp9lc" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.662430 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rp9lc" event={"ID":"0a93232c-afb8-4ff5-8775-5c3574997149","Type":"ContainerDied","Data":"760f4582ce73e1baa1ec174ac291649776c24f5d38a5dfc38e15a00e94f24fa3"} Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.662484 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="760f4582ce73e1baa1ec174ac291649776c24f5d38a5dfc38e15a00e94f24fa3" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.673891 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-65d76b5c54-c9d89" event={"ID":"dee81498-90dd-46c0-949f-c3de3b9bfbd3","Type":"ContainerStarted","Data":"994fab48f1ec5d082dbac6dbfa6a2386cd261fec2d83e1e61647d5a39ddff5de"} Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.673963 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-65d76b5c54-c9d89" event={"ID":"dee81498-90dd-46c0-949f-c3de3b9bfbd3","Type":"ContainerStarted","Data":"03eb880b7c54c52d19c241969c19670e317bdde7216d9078b4531d4658918981"} Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.674474 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.728749 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-65d76b5c54-c9d89" podStartSLOduration=2.728712861 podStartE2EDuration="2.728712861s" podCreationTimestamp="2025-11-21 19:20:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:12.716129013 +0000 UTC m=+1103.501269040" watchObservedRunningTime="2025-11-21 19:20:12.728712861 +0000 UTC m=+1103.513852888" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.974521 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-58799d9dcd-lkd7s"] Nov 21 19:20:12 crc kubenswrapper[4701]: E1121 19:20:12.976965 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a93232c-afb8-4ff5-8775-5c3574997149" containerName="barbican-db-sync" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.976984 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a93232c-afb8-4ff5-8775-5c3574997149" containerName="barbican-db-sync" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.979088 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a93232c-afb8-4ff5-8775-5c3574997149" containerName="barbican-db-sync" Nov 21 19:20:12 crc kubenswrapper[4701]: I1121 19:20:12.995734 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.001312 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6ff5fffc67-6nrzn"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.003758 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.012620 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.012947 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.013338 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-g45kf" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.013851 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.015623 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6ff5fffc67-6nrzn"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.032759 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-58799d9dcd-lkd7s"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.046478 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-78b94b5b48-685pj"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.055096 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8678d9cc8c-7frlc"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.057366 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: W1121 19:20:13.070544 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5db43f2_147c_4625_9f3a_9f68cc6afa8c.slice/crio-d4967d06fd4972c069c7ac1af6946503b60be61664e23e563b22ef0d06142e1d WatchSource:0}: Error finding container d4967d06fd4972c069c7ac1af6946503b60be61664e23e563b22ef0d06142e1d: Status 404 returned error can't find the container with id d4967d06fd4972c069c7ac1af6946503b60be61664e23e563b22ef0d06142e1d Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.113817 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8678d9cc8c-7frlc"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142070 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-combined-ca-bundle\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142153 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgnv6\" (UniqueName: \"kubernetes.io/projected/a20c7ac2-0856-4c64-8910-3c053184c47b-kube-api-access-zgnv6\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142270 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-config-data\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142300 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vxtq\" (UniqueName: \"kubernetes.io/projected/2fecc4b4-2cc3-4e48-9db1-93b05843825b-kube-api-access-4vxtq\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142404 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-combined-ca-bundle\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142435 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls4s7\" (UniqueName: \"kubernetes.io/projected/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-kube-api-access-ls4s7\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142649 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-config-data\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142695 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142755 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-sb\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142801 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-logs\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142925 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a20c7ac2-0856-4c64-8910-3c053184c47b-logs\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142945 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-config-data-custom\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.142975 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-nb\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.143008 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-config-data-custom\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.143073 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-config\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.143103 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-svc\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.227319 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5c5f685fb-t5wpk"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.238532 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.243589 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245605 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-config-data\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245648 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vxtq\" (UniqueName: \"kubernetes.io/projected/2fecc4b4-2cc3-4e48-9db1-93b05843825b-kube-api-access-4vxtq\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245830 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-combined-ca-bundle\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245865 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls4s7\" (UniqueName: \"kubernetes.io/projected/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-kube-api-access-ls4s7\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245935 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-config-data\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245957 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.245986 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-sb\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246012 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-logs\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246248 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a20c7ac2-0856-4c64-8910-3c053184c47b-logs\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246270 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-config-data-custom\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246289 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-nb\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246309 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-config-data-custom\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246338 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-config\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246359 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-svc\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246379 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-combined-ca-bundle\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.246396 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgnv6\" (UniqueName: \"kubernetes.io/projected/a20c7ac2-0856-4c64-8910-3c053184c47b-kube-api-access-zgnv6\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.247616 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a20c7ac2-0856-4c64-8910-3c053184c47b-logs\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.250637 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-sb\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.251282 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.253503 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-config\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.254278 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-nb\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.260312 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-svc\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.267065 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-config-data-custom\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.268941 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-logs\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.269629 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-combined-ca-bundle\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.272218 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-config-data-custom\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.272387 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-config-data\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.272676 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-combined-ca-bundle\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.280995 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls4s7\" (UniqueName: \"kubernetes.io/projected/c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3-kube-api-access-ls4s7\") pod \"barbican-keystone-listener-58799d9dcd-lkd7s\" (UID: \"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3\") " pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.288720 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgnv6\" (UniqueName: \"kubernetes.io/projected/a20c7ac2-0856-4c64-8910-3c053184c47b-kube-api-access-zgnv6\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.294844 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vxtq\" (UniqueName: \"kubernetes.io/projected/2fecc4b4-2cc3-4e48-9db1-93b05843825b-kube-api-access-4vxtq\") pod \"dnsmasq-dns-8678d9cc8c-7frlc\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.297062 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a20c7ac2-0856-4c64-8910-3c053184c47b-config-data\") pod \"barbican-worker-6ff5fffc67-6nrzn\" (UID: \"a20c7ac2-0856-4c64-8910-3c053184c47b\") " pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.300155 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5c5f685fb-t5wpk"] Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.313782 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6ff5fffc67-6nrzn" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.332464 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.347934 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data-custom\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.347976 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f658fc90-2c53-4cdd-b411-16ccb58f7625-logs\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.348027 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.348166 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56djv\" (UniqueName: \"kubernetes.io/projected/f658fc90-2c53-4cdd-b411-16ccb58f7625-kube-api-access-56djv\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.348287 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-combined-ca-bundle\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.419297 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.450930 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data-custom\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.450969 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f658fc90-2c53-4cdd-b411-16ccb58f7625-logs\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.451052 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.451395 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56djv\" (UniqueName: \"kubernetes.io/projected/f658fc90-2c53-4cdd-b411-16ccb58f7625-kube-api-access-56djv\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.451432 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-combined-ca-bundle\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.451800 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f658fc90-2c53-4cdd-b411-16ccb58f7625-logs\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.457778 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-combined-ca-bundle\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.458413 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data-custom\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.461992 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.469482 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56djv\" (UniqueName: \"kubernetes.io/projected/f658fc90-2c53-4cdd-b411-16ccb58f7625-kube-api-access-56djv\") pod \"barbican-api-5c5f685fb-t5wpk\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.637285 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.707402 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-78b94b5b48-685pj" event={"ID":"c5db43f2-147c-4625-9f3a-9f68cc6afa8c","Type":"ContainerStarted","Data":"86bd9a4cf93d728491f91b2227387c0090325a8406d0a523a88e93847070b92e"} Nov 21 19:20:13 crc kubenswrapper[4701]: I1121 19:20:13.707443 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-78b94b5b48-685pj" event={"ID":"c5db43f2-147c-4625-9f3a-9f68cc6afa8c","Type":"ContainerStarted","Data":"d4967d06fd4972c069c7ac1af6946503b60be61664e23e563b22ef0d06142e1d"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.172534 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8678d9cc8c-7frlc"] Nov 21 19:20:14 crc kubenswrapper[4701]: W1121 19:20:14.216447 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fecc4b4_2cc3_4e48_9db1_93b05843825b.slice/crio-1479a2d8829dbeff9757537861e2539633624b8c51e13561905098301a284f88 WatchSource:0}: Error finding container 1479a2d8829dbeff9757537861e2539633624b8c51e13561905098301a284f88: Status 404 returned error can't find the container with id 1479a2d8829dbeff9757537861e2539633624b8c51e13561905098301a284f88 Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.384052 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-58799d9dcd-lkd7s"] Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.492965 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6ff5fffc67-6nrzn"] Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.499518 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5c5f685fb-t5wpk"] Nov 21 19:20:14 crc kubenswrapper[4701]: W1121 19:20:14.541646 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda20c7ac2_0856_4c64_8910_3c053184c47b.slice/crio-345dd8912c55e03043fa755ecd8157c732c6bbafe6ce009e4ed74de4899b246a WatchSource:0}: Error finding container 345dd8912c55e03043fa755ecd8157c732c6bbafe6ce009e4ed74de4899b246a: Status 404 returned error can't find the container with id 345dd8912c55e03043fa755ecd8157c732c6bbafe6ce009e4ed74de4899b246a Nov 21 19:20:14 crc kubenswrapper[4701]: W1121 19:20:14.545961 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf658fc90_2c53_4cdd_b411_16ccb58f7625.slice/crio-7025fb1aa63d4f90a38c93f92d07cd72c8fee51fdaede4c67a01d64014f3962b WatchSource:0}: Error finding container 7025fb1aa63d4f90a38c93f92d07cd72c8fee51fdaede4c67a01d64014f3962b: Status 404 returned error can't find the container with id 7025fb1aa63d4f90a38c93f92d07cd72c8fee51fdaede4c67a01d64014f3962b Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.718483 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" event={"ID":"2fecc4b4-2cc3-4e48-9db1-93b05843825b","Type":"ContainerStarted","Data":"1479a2d8829dbeff9757537861e2539633624b8c51e13561905098301a284f88"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.722030 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-78b94b5b48-685pj" event={"ID":"c5db43f2-147c-4625-9f3a-9f68cc6afa8c","Type":"ContainerStarted","Data":"bf11c61a3663417045c11f46738c1329145b4936746dd5266e30f5596eba5492"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.722441 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.722504 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.732404 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6ff5fffc67-6nrzn" event={"ID":"a20c7ac2-0856-4c64-8910-3c053184c47b","Type":"ContainerStarted","Data":"345dd8912c55e03043fa755ecd8157c732c6bbafe6ce009e4ed74de4899b246a"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.753829 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-78b94b5b48-685pj" podStartSLOduration=3.753801524 podStartE2EDuration="3.753801524s" podCreationTimestamp="2025-11-21 19:20:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:14.746510478 +0000 UTC m=+1105.531650505" watchObservedRunningTime="2025-11-21 19:20:14.753801524 +0000 UTC m=+1105.538941551" Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.755983 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" event={"ID":"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3","Type":"ContainerStarted","Data":"145cac3322a54ffdcdbbfa51a089bc5b117c44ce1344b1726f75340d083aab7d"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.759309 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c5f685fb-t5wpk" event={"ID":"f658fc90-2c53-4cdd-b411-16ccb58f7625","Type":"ContainerStarted","Data":"7025fb1aa63d4f90a38c93f92d07cd72c8fee51fdaede4c67a01d64014f3962b"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.762238 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-69chf" event={"ID":"d5b93dd5-e6da-4f02-ac4d-b89773e967d3","Type":"ContainerStarted","Data":"75887c19942bf90b052d280604c77e8e44be72e03d815f7b73e82cae6d1936d6"} Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.783412 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-69chf" podStartSLOduration=4.517576193 podStartE2EDuration="58.783399557s" podCreationTimestamp="2025-11-21 19:19:16 +0000 UTC" firstStartedPulling="2025-11-21 19:19:17.919614132 +0000 UTC m=+1048.704754159" lastFinishedPulling="2025-11-21 19:20:12.185437496 +0000 UTC m=+1102.970577523" observedRunningTime="2025-11-21 19:20:14.779887763 +0000 UTC m=+1105.565027790" watchObservedRunningTime="2025-11-21 19:20:14.783399557 +0000 UTC m=+1105.568539584" Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.806586 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.160:9322/\": read tcp 10.217.0.2:37612->10.217.0.160:9322: read: connection reset by peer" Nov 21 19:20:14 crc kubenswrapper[4701]: I1121 19:20:14.806962 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9322/\": read tcp 10.217.0.2:37608->10.217.0.160:9322: read: connection reset by peer" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.514644 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.640728 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-config-data\") pod \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.640912 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-logs\") pod \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.640943 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-combined-ca-bundle\") pod \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.641162 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f9k4\" (UniqueName: \"kubernetes.io/projected/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-kube-api-access-7f9k4\") pod \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.641271 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-custom-prometheus-ca\") pod \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\" (UID: \"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a\") " Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.642344 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-logs" (OuterVolumeSpecName: "logs") pod "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" (UID: "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.664407 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.666230 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.667425 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-kube-api-access-7f9k4" (OuterVolumeSpecName: "kube-api-access-7f9k4") pod "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" (UID: "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a"). InnerVolumeSpecName "kube-api-access-7f9k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.693524 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" (UID: "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.723694 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" (UID: "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.744036 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f9k4\" (UniqueName: \"kubernetes.io/projected/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-kube-api-access-7f9k4\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.744068 4701 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.744077 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.744085 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.758884 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-config-data" (OuterVolumeSpecName: "config-data") pod "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" (UID: "bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.768374 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6c68b8ff68-tfcgs" podUID="7d8b1846-dcd5-49b4-8eb2-74b0462538e1" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.780993 4701 generic.go:334] "Generic (PLEG): container finished" podID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerID="baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d" exitCode=0 Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.781082 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" event={"ID":"2fecc4b4-2cc3-4e48-9db1-93b05843825b","Type":"ContainerDied","Data":"baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d"} Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.794271 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c5f685fb-t5wpk" event={"ID":"f658fc90-2c53-4cdd-b411-16ccb58f7625","Type":"ContainerStarted","Data":"d0c41d2af7ff5fbb1537a95f6b77d61a9ea9f0442bd7f7feface68c45bd96eeb"} Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.794340 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c5f685fb-t5wpk" event={"ID":"f658fc90-2c53-4cdd-b411-16ccb58f7625","Type":"ContainerStarted","Data":"8654f42a09f9d1570c25cfce1f5f3c459a60f7a17f9a2a9763183d04aaad1230"} Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.794389 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.794416 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.803976 4701 generic.go:334] "Generic (PLEG): container finished" podID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerID="4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe" exitCode=0 Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.804090 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a","Type":"ContainerDied","Data":"4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe"} Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.804125 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a","Type":"ContainerDied","Data":"4a7fdd395a44c4caabd353dd21ac297e016c5c5ed4291c3548bae811ee696515"} Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.804146 4701 scope.go:117] "RemoveContainer" containerID="4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.804320 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.818611 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a7a5be4-96a4-4574-9839-2d0576595305" containerID="92827e25281b8993835100f95f59694457273f1eed9546f58a23850bd3b6a025" exitCode=1 Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.818724 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerDied","Data":"92827e25281b8993835100f95f59694457273f1eed9546f58a23850bd3b6a025"} Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.820049 4701 scope.go:117] "RemoveContainer" containerID="92827e25281b8993835100f95f59694457273f1eed9546f58a23850bd3b6a025" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.848478 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.857437 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5c5f685fb-t5wpk" podStartSLOduration=2.857399881 podStartE2EDuration="2.857399881s" podCreationTimestamp="2025-11-21 19:20:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:15.830268913 +0000 UTC m=+1106.615408940" watchObservedRunningTime="2025-11-21 19:20:15.857399881 +0000 UTC m=+1106.642539928" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.895440 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.931290 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.951376 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:20:15 crc kubenswrapper[4701]: E1121 19:20:15.952073 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api-log" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.952094 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api-log" Nov 21 19:20:15 crc kubenswrapper[4701]: E1121 19:20:15.952120 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.952127 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.952394 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.952411 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" containerName="watcher-api-log" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.968656 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.981640 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.982025 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 21 19:20:15 crc kubenswrapper[4701]: I1121 19:20:15.982308 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.023336 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a" path="/var/lib/kubelet/pods/bab80bbd-56bb-4fa5-a23e-ff19bbb4a87a/volumes" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.024180 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099133 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099180 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099382 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-config-data\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099406 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099445 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-public-tls-certs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099507 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq2rt\" (UniqueName: \"kubernetes.io/projected/b909f8f0-603a-420b-8b12-2b15b6c0900e-kube-api-access-hq2rt\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.099545 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b909f8f0-603a-420b-8b12-2b15b6c0900e-logs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.201893 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq2rt\" (UniqueName: \"kubernetes.io/projected/b909f8f0-603a-420b-8b12-2b15b6c0900e-kube-api-access-hq2rt\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.201964 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b909f8f0-603a-420b-8b12-2b15b6c0900e-logs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.202018 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.202043 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.202118 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-config-data\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.202144 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.202178 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-public-tls-certs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.202738 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b909f8f0-603a-420b-8b12-2b15b6c0900e-logs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.210112 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-config-data\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.210646 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-public-tls-certs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.211604 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.212128 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.219067 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b909f8f0-603a-420b-8b12-2b15b6c0900e-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.237125 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq2rt\" (UniqueName: \"kubernetes.io/projected/b909f8f0-603a-420b-8b12-2b15b6c0900e-kube-api-access-hq2rt\") pod \"watcher-api-0\" (UID: \"b909f8f0-603a-420b-8b12-2b15b6c0900e\") " pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.318783 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.481950 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.482039 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.509260 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.577615 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 21 19:20:16 crc kubenswrapper[4701]: I1121 19:20:16.874452 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.036146 4701 scope.go:117] "RemoveContainer" containerID="7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.377400 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-796cb85bf8-h88pn"] Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.387032 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.394955 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.396346 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.444128 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-796cb85bf8-h88pn"] Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544137 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-config-data\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544289 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b306d9-cacf-4e38-b19f-60f8ebe026a7-logs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544349 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-public-tls-certs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544447 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvwxn\" (UniqueName: \"kubernetes.io/projected/59b306d9-cacf-4e38-b19f-60f8ebe026a7-kube-api-access-fvwxn\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544492 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-config-data-custom\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544529 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-internal-tls-certs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.544593 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-combined-ca-bundle\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.646732 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-combined-ca-bundle\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.646906 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-config-data\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.647949 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b306d9-cacf-4e38-b19f-60f8ebe026a7-logs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.646941 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b306d9-cacf-4e38-b19f-60f8ebe026a7-logs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.649317 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-public-tls-certs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.649641 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvwxn\" (UniqueName: \"kubernetes.io/projected/59b306d9-cacf-4e38-b19f-60f8ebe026a7-kube-api-access-fvwxn\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.649854 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-config-data-custom\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.650172 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-internal-tls-certs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.654545 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-combined-ca-bundle\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.656645 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-public-tls-certs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.658710 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-config-data-custom\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.661251 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-internal-tls-certs\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.665157 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b306d9-cacf-4e38-b19f-60f8ebe026a7-config-data\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.679418 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvwxn\" (UniqueName: \"kubernetes.io/projected/59b306d9-cacf-4e38-b19f-60f8ebe026a7-kube-api-access-fvwxn\") pod \"barbican-api-796cb85bf8-h88pn\" (UID: \"59b306d9-cacf-4e38-b19f-60f8ebe026a7\") " pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:17 crc kubenswrapper[4701]: I1121 19:20:17.727458 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:18 crc kubenswrapper[4701]: I1121 19:20:18.614034 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:20:18 crc kubenswrapper[4701]: I1121 19:20:18.614110 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:20:19 crc kubenswrapper[4701]: I1121 19:20:19.177944 4701 scope.go:117] "RemoveContainer" containerID="4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe" Nov 21 19:20:19 crc kubenswrapper[4701]: E1121 19:20:19.178993 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe\": container with ID starting with 4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe not found: ID does not exist" containerID="4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe" Nov 21 19:20:19 crc kubenswrapper[4701]: I1121 19:20:19.179043 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe"} err="failed to get container status \"4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe\": rpc error: code = NotFound desc = could not find container \"4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe\": container with ID starting with 4cb497b0052b985efea73a13783d29d28a1d0219bec6d49601b0ffb5c93a0efe not found: ID does not exist" Nov 21 19:20:19 crc kubenswrapper[4701]: I1121 19:20:19.179079 4701 scope.go:117] "RemoveContainer" containerID="7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9" Nov 21 19:20:19 crc kubenswrapper[4701]: E1121 19:20:19.179526 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9\": container with ID starting with 7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9 not found: ID does not exist" containerID="7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9" Nov 21 19:20:19 crc kubenswrapper[4701]: I1121 19:20:19.179559 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9"} err="failed to get container status \"7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9\": rpc error: code = NotFound desc = could not find container \"7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9\": container with ID starting with 7904f05470d37b18128384028bcbebca83b236367b317de53ac8c691079ab4e9 not found: ID does not exist" Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.339665 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.461580 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-796cb85bf8-h88pn"] Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.936977 4701 generic.go:334] "Generic (PLEG): container finished" podID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" containerID="75887c19942bf90b052d280604c77e8e44be72e03d815f7b73e82cae6d1936d6" exitCode=0 Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.937155 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-69chf" event={"ID":"d5b93dd5-e6da-4f02-ac4d-b89773e967d3","Type":"ContainerDied","Data":"75887c19942bf90b052d280604c77e8e44be72e03d815f7b73e82cae6d1936d6"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.940641 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerStarted","Data":"bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.945262 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" event={"ID":"2fecc4b4-2cc3-4e48-9db1-93b05843825b","Type":"ContainerStarted","Data":"5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.946158 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.980406 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" podStartSLOduration=8.980381697 podStartE2EDuration="8.980381697s" podCreationTimestamp="2025-11-21 19:20:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:21.977063888 +0000 UTC m=+1112.762203915" watchObservedRunningTime="2025-11-21 19:20:21.980381697 +0000 UTC m=+1112.765521724" Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.986157 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6ff5fffc67-6nrzn" event={"ID":"a20c7ac2-0856-4c64-8910-3c053184c47b","Type":"ContainerStarted","Data":"b89b05f1230274607c3c2bcf9ea8c15d2344be1e1694ce134bdb58faafcc8cb8"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.986232 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6ff5fffc67-6nrzn" event={"ID":"a20c7ac2-0856-4c64-8910-3c053184c47b","Type":"ContainerStarted","Data":"40f5864edb1dc213555320f32065a7e02c05c13c542cfebd276f2c94a457c223"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.986250 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" event={"ID":"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3","Type":"ContainerStarted","Data":"0a9291642a47711bbd78fed6db71dfe2d4929af49361880ef74cd10f95457646"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.986264 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" event={"ID":"c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3","Type":"ContainerStarted","Data":"45a8b738bdf5e29d341bdc57f6ad3d8eed8867d2d9647ccb4c916754365df9ce"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.989293 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b909f8f0-603a-420b-8b12-2b15b6c0900e","Type":"ContainerStarted","Data":"1f8e356bf3d5bb74fe25c64fe56acd8c599068b8897490f22606bf490438ddf5"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.989373 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.989388 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b909f8f0-603a-420b-8b12-2b15b6c0900e","Type":"ContainerStarted","Data":"e0eb684d18ffaf4e84b7d1e5db5a1aa23e8f358e1ca9498a56c6fe69984b0eb9"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.989398 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b909f8f0-603a-420b-8b12-2b15b6c0900e","Type":"ContainerStarted","Data":"24c4cc4e3af56c6af197bc34fb3ac9b8d296a168c262a2b71d2054ca79b64551"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.994009 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerStarted","Data":"ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b"} Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.994431 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="b909f8f0-603a-420b-8b12-2b15b6c0900e" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.173:9322/\": dial tcp 10.217.0.173:9322: connect: connection refused" Nov 21 19:20:21 crc kubenswrapper[4701]: I1121 19:20:21.999560 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-58799d9dcd-lkd7s" podStartSLOduration=3.609476974 podStartE2EDuration="9.999543791s" podCreationTimestamp="2025-11-21 19:20:12 +0000 UTC" firstStartedPulling="2025-11-21 19:20:14.42059076 +0000 UTC m=+1105.205730787" lastFinishedPulling="2025-11-21 19:20:20.810657577 +0000 UTC m=+1111.595797604" observedRunningTime="2025-11-21 19:20:21.995246066 +0000 UTC m=+1112.780386093" watchObservedRunningTime="2025-11-21 19:20:21.999543791 +0000 UTC m=+1112.784683818" Nov 21 19:20:22 crc kubenswrapper[4701]: I1121 19:20:22.008767 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796cb85bf8-h88pn" event={"ID":"59b306d9-cacf-4e38-b19f-60f8ebe026a7","Type":"ContainerStarted","Data":"1027d5695e6f3e60e5e46365bec06bff34c3889622113daa10215e4abc2b8edd"} Nov 21 19:20:22 crc kubenswrapper[4701]: I1121 19:20:22.008802 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796cb85bf8-h88pn" event={"ID":"59b306d9-cacf-4e38-b19f-60f8ebe026a7","Type":"ContainerStarted","Data":"12ab9ef74abb9c2d88f23b8bbb065c4316263b601d0822e4a2bb937a6fc58019"} Nov 21 19:20:22 crc kubenswrapper[4701]: I1121 19:20:22.025108 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6ff5fffc67-6nrzn" podStartSLOduration=3.731264258 podStartE2EDuration="10.025090995s" podCreationTimestamp="2025-11-21 19:20:12 +0000 UTC" firstStartedPulling="2025-11-21 19:20:14.545060937 +0000 UTC m=+1105.330200964" lastFinishedPulling="2025-11-21 19:20:20.838887674 +0000 UTC m=+1111.624027701" observedRunningTime="2025-11-21 19:20:22.017759539 +0000 UTC m=+1112.802899566" watchObservedRunningTime="2025-11-21 19:20:22.025090995 +0000 UTC m=+1112.810231022" Nov 21 19:20:22 crc kubenswrapper[4701]: I1121 19:20:22.086164 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=7.086145473 podStartE2EDuration="7.086145473s" podCreationTimestamp="2025-11-21 19:20:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:22.078065056 +0000 UTC m=+1112.863205083" watchObservedRunningTime="2025-11-21 19:20:22.086145473 +0000 UTC m=+1112.871285500" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.036470 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796cb85bf8-h88pn" event={"ID":"59b306d9-cacf-4e38-b19f-60f8ebe026a7","Type":"ContainerStarted","Data":"341347a2ea8d3f917497beb2c9885113d06c95a2b8ef3b240e95c336364cc7b1"} Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.069386 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-796cb85bf8-h88pn" podStartSLOduration=6.069363012 podStartE2EDuration="6.069363012s" podCreationTimestamp="2025-11-21 19:20:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:23.061779899 +0000 UTC m=+1113.846919926" watchObservedRunningTime="2025-11-21 19:20:23.069363012 +0000 UTC m=+1113.854503039" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.492115 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-69chf" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.605161 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-etc-machine-id\") pod \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.605363 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d5b93dd5-e6da-4f02-ac4d-b89773e967d3" (UID: "d5b93dd5-e6da-4f02-ac4d-b89773e967d3"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.605807 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-config-data\") pod \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.606013 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-combined-ca-bundle\") pod \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.606077 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-db-sync-config-data\") pod \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.606106 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjwr4\" (UniqueName: \"kubernetes.io/projected/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-kube-api-access-vjwr4\") pod \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.606130 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-scripts\") pod \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\" (UID: \"d5b93dd5-e6da-4f02-ac4d-b89773e967d3\") " Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.607195 4701 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.613367 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-kube-api-access-vjwr4" (OuterVolumeSpecName: "kube-api-access-vjwr4") pod "d5b93dd5-e6da-4f02-ac4d-b89773e967d3" (UID: "d5b93dd5-e6da-4f02-ac4d-b89773e967d3"). InnerVolumeSpecName "kube-api-access-vjwr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.613537 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-scripts" (OuterVolumeSpecName: "scripts") pod "d5b93dd5-e6da-4f02-ac4d-b89773e967d3" (UID: "d5b93dd5-e6da-4f02-ac4d-b89773e967d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.643887 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d5b93dd5-e6da-4f02-ac4d-b89773e967d3" (UID: "d5b93dd5-e6da-4f02-ac4d-b89773e967d3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.651095 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5b93dd5-e6da-4f02-ac4d-b89773e967d3" (UID: "d5b93dd5-e6da-4f02-ac4d-b89773e967d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.709285 4701 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.709322 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjwr4\" (UniqueName: \"kubernetes.io/projected/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-kube-api-access-vjwr4\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.709336 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.709346 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.724416 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-config-data" (OuterVolumeSpecName: "config-data") pod "d5b93dd5-e6da-4f02-ac4d-b89773e967d3" (UID: "d5b93dd5-e6da-4f02-ac4d-b89773e967d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:23 crc kubenswrapper[4701]: I1121 19:20:23.812593 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b93dd5-e6da-4f02-ac4d-b89773e967d3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.047510 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-69chf" event={"ID":"d5b93dd5-e6da-4f02-ac4d-b89773e967d3","Type":"ContainerDied","Data":"f7ea46bbf91e1a6afb865a5af39dca797bc454b97be2a101c07eb378693c5f00"} Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.047552 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7ea46bbf91e1a6afb865a5af39dca797bc454b97be2a101c07eb378693c5f00" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.047615 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-69chf" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.050850 4701 generic.go:334] "Generic (PLEG): container finished" podID="87272c2c-3166-4a6a-aff9-41278b0b1b51" containerID="7b177444e98fadf0cc0025209b7d78991c3bced6b8dad01552c748c345076aa4" exitCode=0 Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.050993 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sklns" event={"ID":"87272c2c-3166-4a6a-aff9-41278b0b1b51","Type":"ContainerDied","Data":"7b177444e98fadf0cc0025209b7d78991c3bced6b8dad01552c748c345076aa4"} Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.051751 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.051983 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.310813 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:24 crc kubenswrapper[4701]: E1121 19:20:24.313230 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" containerName="cinder-db-sync" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.313251 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" containerName="cinder-db-sync" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.313454 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" containerName="cinder-db-sync" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.314664 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.319612 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.319639 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.319863 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.323352 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-wzsf6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.349936 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.396208 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8678d9cc8c-7frlc"] Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.428449 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqc5m\" (UniqueName: \"kubernetes.io/projected/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-kube-api-access-nqc5m\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.428515 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.428536 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.428643 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.428672 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.428718 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.432847 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cb76f667f-tsrk6"] Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.434626 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.470818 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cb76f667f-tsrk6"] Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532741 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-sb\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532786 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-config\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532819 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532867 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqc5m\" (UniqueName: \"kubernetes.io/projected/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-kube-api-access-nqc5m\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532886 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-nb\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532914 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.532932 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.533003 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.533024 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-svc\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.533040 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpbzh\" (UniqueName: \"kubernetes.io/projected/9d84924e-04b9-4541-bf13-c47781cf883f-kube-api-access-tpbzh\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.533060 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.533082 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-swift-storage-0\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.540914 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.540972 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.542528 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.554175 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.554800 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.574817 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqc5m\" (UniqueName: \"kubernetes.io/projected/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-kube-api-access-nqc5m\") pod \"cinder-scheduler-0\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.604291 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.606283 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.609503 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.630252 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.641952 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-svc\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.642109 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpbzh\" (UniqueName: \"kubernetes.io/projected/9d84924e-04b9-4541-bf13-c47781cf883f-kube-api-access-tpbzh\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.642212 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-swift-storage-0\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.642244 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-sb\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.642263 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-config\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.642326 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-nb\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.643236 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-nb\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.643750 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-svc\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.645344 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-sb\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.646476 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-config\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.647867 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.648952 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-swift-storage-0\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.661447 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpbzh\" (UniqueName: \"kubernetes.io/projected/9d84924e-04b9-4541-bf13-c47781cf883f-kube-api-access-tpbzh\") pod \"dnsmasq-dns-5cb76f667f-tsrk6\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746397 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746491 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746527 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data-custom\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746601 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/025b9b80-a0fc-4f59-b25b-d541738c8dfc-etc-machine-id\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746618 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmplx\" (UniqueName: \"kubernetes.io/projected/025b9b80-a0fc-4f59-b25b-d541738c8dfc-kube-api-access-lmplx\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746631 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-scripts\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.746664 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/025b9b80-a0fc-4f59-b25b-d541738c8dfc-logs\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.770071 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854571 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/025b9b80-a0fc-4f59-b25b-d541738c8dfc-etc-machine-id\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854624 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmplx\" (UniqueName: \"kubernetes.io/projected/025b9b80-a0fc-4f59-b25b-d541738c8dfc-kube-api-access-lmplx\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854653 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-scripts\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854700 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/025b9b80-a0fc-4f59-b25b-d541738c8dfc-logs\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854704 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/025b9b80-a0fc-4f59-b25b-d541738c8dfc-etc-machine-id\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854735 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854833 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.854876 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data-custom\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.860360 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/025b9b80-a0fc-4f59-b25b-d541738c8dfc-logs\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.863601 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.863650 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-scripts\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.865333 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data-custom\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.866220 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:24 crc kubenswrapper[4701]: I1121 19:20:24.902346 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmplx\" (UniqueName: \"kubernetes.io/projected/025b9b80-a0fc-4f59-b25b-d541738c8dfc-kube-api-access-lmplx\") pod \"cinder-api-0\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " pod="openstack/cinder-api-0" Nov 21 19:20:25 crc kubenswrapper[4701]: I1121 19:20:25.066376 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerName="dnsmasq-dns" containerID="cri-o://5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6" gracePeriod=10 Nov 21 19:20:25 crc kubenswrapper[4701]: I1121 19:20:25.183708 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 19:20:25 crc kubenswrapper[4701]: I1121 19:20:25.286984 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:25 crc kubenswrapper[4701]: W1121 19:20:25.363150 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d332f3a_bf5e_4154_93c9_36265e2b9a3d.slice/crio-79f019768ec73633a9069b464ee024f48a384653746a4c9de5d15fe1f91649d6 WatchSource:0}: Error finding container 79f019768ec73633a9069b464ee024f48a384653746a4c9de5d15fe1f91649d6: Status 404 returned error can't find the container with id 79f019768ec73633a9069b464ee024f48a384653746a4c9de5d15fe1f91649d6 Nov 21 19:20:25 crc kubenswrapper[4701]: I1121 19:20:25.491383 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cb76f667f-tsrk6"] Nov 21 19:20:25 crc kubenswrapper[4701]: I1121 19:20:25.580358 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.033782 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sklns" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.035381 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.049866 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.125253 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.141693 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a7a5be4-96a4-4574-9839-2d0576595305" containerID="ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b" exitCode=1 Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.141753 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerDied","Data":"ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b"} Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.141813 4701 scope.go:117] "RemoveContainer" containerID="92827e25281b8993835100f95f59694457273f1eed9546f58a23850bd3b6a025" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.142995 4701 scope.go:117] "RemoveContainer" containerID="ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b" Nov 21 19:20:26 crc kubenswrapper[4701]: E1121 19:20:26.143307 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.164106 4701 generic.go:334] "Generic (PLEG): container finished" podID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerID="5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6" exitCode=0 Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.164761 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" event={"ID":"2fecc4b4-2cc3-4e48-9db1-93b05843825b","Type":"ContainerDied","Data":"5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6"} Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.164800 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" event={"ID":"2fecc4b4-2cc3-4e48-9db1-93b05843825b","Type":"ContainerDied","Data":"1479a2d8829dbeff9757537861e2539633624b8c51e13561905098301a284f88"} Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.164889 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8678d9cc8c-7frlc" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208128 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-db-sync-config-data\") pod \"87272c2c-3166-4a6a-aff9-41278b0b1b51\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208256 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-config\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208308 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208405 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-combined-ca-bundle\") pod \"87272c2c-3166-4a6a-aff9-41278b0b1b51\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208445 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-nb\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208534 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-sb\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208565 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-svc\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208606 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-config-data\") pod \"87272c2c-3166-4a6a-aff9-41278b0b1b51\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208641 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vxtq\" (UniqueName: \"kubernetes.io/projected/2fecc4b4-2cc3-4e48-9db1-93b05843825b-kube-api-access-4vxtq\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.208701 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdv6s\" (UniqueName: \"kubernetes.io/projected/87272c2c-3166-4a6a-aff9-41278b0b1b51-kube-api-access-kdv6s\") pod \"87272c2c-3166-4a6a-aff9-41278b0b1b51\" (UID: \"87272c2c-3166-4a6a-aff9-41278b0b1b51\") " Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.233799 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" event={"ID":"9d84924e-04b9-4541-bf13-c47781cf883f","Type":"ContainerStarted","Data":"226cd57a6cfd79181f63e3b2475517c3c33c4b02cd8d3a62b8c34e88c99dec91"} Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.245848 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fecc4b4-2cc3-4e48-9db1-93b05843825b-kube-api-access-4vxtq" (OuterVolumeSpecName: "kube-api-access-4vxtq") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b"). InnerVolumeSpecName "kube-api-access-4vxtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.266248 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "87272c2c-3166-4a6a-aff9-41278b0b1b51" (UID: "87272c2c-3166-4a6a-aff9-41278b0b1b51"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.266408 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87272c2c-3166-4a6a-aff9-41278b0b1b51-kube-api-access-kdv6s" (OuterVolumeSpecName: "kube-api-access-kdv6s") pod "87272c2c-3166-4a6a-aff9-41278b0b1b51" (UID: "87272c2c-3166-4a6a-aff9-41278b0b1b51"). InnerVolumeSpecName "kube-api-access-kdv6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.266623 4701 scope.go:117] "RemoveContainer" containerID="5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.293593 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1d332f3a-bf5e-4154-93c9-36265e2b9a3d","Type":"ContainerStarted","Data":"79f019768ec73633a9069b464ee024f48a384653746a4c9de5d15fe1f91649d6"} Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.310552 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87272c2c-3166-4a6a-aff9-41278b0b1b51" (UID: "87272c2c-3166-4a6a-aff9-41278b0b1b51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.320004 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.320517 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.321661 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sklns" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.321741 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sklns" event={"ID":"87272c2c-3166-4a6a-aff9-41278b0b1b51","Type":"ContainerDied","Data":"e418e9aa2674d7ff4bf099fb85d5f3b49d0f5b2120d4c27aaaa5a8cacd7cee7c"} Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.321763 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e418e9aa2674d7ff4bf099fb85d5f3b49d0f5b2120d4c27aaaa5a8cacd7cee7c" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.354087 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.355669 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vxtq\" (UniqueName: \"kubernetes.io/projected/2fecc4b4-2cc3-4e48-9db1-93b05843825b-kube-api-access-4vxtq\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.355691 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdv6s\" (UniqueName: \"kubernetes.io/projected/87272c2c-3166-4a6a-aff9-41278b0b1b51-kube-api-access-kdv6s\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.355702 4701 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.399990 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.473059 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.473636 4701 scope.go:117] "RemoveContainer" containerID="baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.488815 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.488955 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.489008 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.489033 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.490551 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.490580 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.498907 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.498936 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-config" (OuterVolumeSpecName: "config") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.500551 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-config-data" (OuterVolumeSpecName: "config-data") pod "87272c2c-3166-4a6a-aff9-41278b0b1b51" (UID: "87272c2c-3166-4a6a-aff9-41278b0b1b51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:26 crc kubenswrapper[4701]: E1121 19:20:26.523801 4701 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0 podName:2fecc4b4-2cc3-4e48-9db1-93b05843825b nodeName:}" failed. No retries permitted until 2025-11-21 19:20:27.023745074 +0000 UTC m=+1117.808885101 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "dns-swift-storage-0" (UniqueName: "kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b") : error deleting /var/lib/kubelet/pods/2fecc4b4-2cc3-4e48-9db1-93b05843825b/volume-subpaths: remove /var/lib/kubelet/pods/2fecc4b4-2cc3-4e48-9db1-93b05843825b/volume-subpaths: no such file or directory Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.593608 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.593647 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87272c2c-3166-4a6a-aff9-41278b0b1b51-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.593666 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.644402 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cb76f667f-tsrk6"] Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.706268 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-679559bbc5-wxbzl"] Nov 21 19:20:26 crc kubenswrapper[4701]: E1121 19:20:26.706831 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87272c2c-3166-4a6a-aff9-41278b0b1b51" containerName="glance-db-sync" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.706845 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="87272c2c-3166-4a6a-aff9-41278b0b1b51" containerName="glance-db-sync" Nov 21 19:20:26 crc kubenswrapper[4701]: E1121 19:20:26.706866 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerName="dnsmasq-dns" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.706872 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerName="dnsmasq-dns" Nov 21 19:20:26 crc kubenswrapper[4701]: E1121 19:20:26.706883 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerName="init" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.706890 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerName="init" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.707084 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="87272c2c-3166-4a6a-aff9-41278b0b1b51" containerName="glance-db-sync" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.707095 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" containerName="dnsmasq-dns" Nov 21 19:20:26 crc kubenswrapper[4701]: I1121 19:20:26.708340 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.731017 4701 scope.go:117] "RemoveContainer" containerID="5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6" Nov 21 19:20:27 crc kubenswrapper[4701]: E1121 19:20:26.743905 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6\": container with ID starting with 5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6 not found: ID does not exist" containerID="5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.743952 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6"} err="failed to get container status \"5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6\": rpc error: code = NotFound desc = could not find container \"5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6\": container with ID starting with 5a07422e44cf54dc14452b4da0d7f7ccce323483b5e4117e874acfd3095d88d6 not found: ID does not exist" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.743981 4701 scope.go:117] "RemoveContainer" containerID="baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d" Nov 21 19:20:27 crc kubenswrapper[4701]: E1121 19:20:26.766931 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d\": container with ID starting with baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d not found: ID does not exist" containerID="baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.766977 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d"} err="failed to get container status \"baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d\": rpc error: code = NotFound desc = could not find container \"baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d\": container with ID starting with baced60d4a4193ff5a9cec1723aaa0a47dce1897406515b5d1925ab55c21ee4d not found: ID does not exist" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.798353 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-679559bbc5-wxbzl"] Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.800724 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-svc\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.800799 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-nb\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.800842 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxmvq\" (UniqueName: \"kubernetes.io/projected/c4234702-4265-4b3a-ab18-9ba8d244ea33-kube-api-access-hxmvq\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.800860 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-sb\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.800899 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-swift-storage-0\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.800939 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-config\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.907662 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-config\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.907733 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-svc\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.913128 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-svc\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.913396 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-config\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.915793 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-nb\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.915963 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxmvq\" (UniqueName: \"kubernetes.io/projected/c4234702-4265-4b3a-ab18-9ba8d244ea33-kube-api-access-hxmvq\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.916005 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-sb\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.916144 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-swift-storage-0\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.917464 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-nb\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.917842 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-sb\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.918067 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-swift-storage-0\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:26.955828 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxmvq\" (UniqueName: \"kubernetes.io/projected/c4234702-4265-4b3a-ab18-9ba8d244ea33-kube-api-access-hxmvq\") pod \"dnsmasq-dns-679559bbc5-wxbzl\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.034401 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0\") pod \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\" (UID: \"2fecc4b4-2cc3-4e48-9db1-93b05843825b\") " Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.036672 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2fecc4b4-2cc3-4e48-9db1-93b05843825b" (UID: "2fecc4b4-2cc3-4e48-9db1-93b05843825b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.139763 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2fecc4b4-2cc3-4e48-9db1-93b05843825b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.334378 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/watcher-api-0" podUID="b909f8f0-603a-420b-8b12-2b15b6c0900e" containerName="watcher-api-log" probeResult="failure" output="Get \"https://10.217.0.173:9322/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.374316 4701 scope.go:117] "RemoveContainer" containerID="ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b" Nov 21 19:20:27 crc kubenswrapper[4701]: E1121 19:20:27.374634 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.404400 4701 generic.go:334] "Generic (PLEG): container finished" podID="9d84924e-04b9-4541-bf13-c47781cf883f" containerID="cc89b277215fa18a0a438746b00c17cb74473925ee356375bf3ece4761c003a6" exitCode=0 Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.404468 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" event={"ID":"9d84924e-04b9-4541-bf13-c47781cf883f","Type":"ContainerDied","Data":"cc89b277215fa18a0a438746b00c17cb74473925ee356375bf3ece4761c003a6"} Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.416090 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.417117 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"025b9b80-a0fc-4f59-b25b-d541738c8dfc","Type":"ContainerStarted","Data":"735f9a47d8ebea7bf324344e45152e4cdde55a3f9bab40e2a1b1b17c9395d2cd"} Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.450919 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.453225 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.466706 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.467333 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4fvtn" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.467460 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.473785 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.596755 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.596853 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-logs\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.596891 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.596936 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6mnh\" (UniqueName: \"kubernetes.io/projected/d4a4641c-4d04-4fc6-957b-d49ddff014ab-kube-api-access-b6mnh\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.596966 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.596989 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.597235 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.697775 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699608 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699667 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-logs\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699697 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699732 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6mnh\" (UniqueName: \"kubernetes.io/projected/d4a4641c-4d04-4fc6-957b-d49ddff014ab-kube-api-access-b6mnh\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699752 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699773 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.699809 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.700155 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.709159 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-logs\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.712397 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.713007 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.724853 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.725179 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.762590 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6mnh\" (UniqueName: \"kubernetes.io/projected/d4a4641c-4d04-4fc6-957b-d49ddff014ab-kube-api-access-b6mnh\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.775678 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.781568 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:27 crc kubenswrapper[4701]: I1121 19:20:27.905821 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.156493 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.188486 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.206372 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.210567 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.227786 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.304920 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8678d9cc8c-7frlc"] Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.335135 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8678d9cc8c-7frlc"] Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.372980 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.373068 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-logs\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.373090 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcvc7\" (UniqueName: \"kubernetes.io/projected/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-kube-api-access-rcvc7\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.373138 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-scripts\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.373179 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.373220 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.373254 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-config-data\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.483917 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.484020 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-logs\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.484065 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcvc7\" (UniqueName: \"kubernetes.io/projected/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-kube-api-access-rcvc7\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.484103 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-scripts\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.484158 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.484185 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.484236 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-config-data\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.485299 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.485537 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-logs\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.486052 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.493976 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1d332f3a-bf5e-4154-93c9-36265e2b9a3d","Type":"ContainerStarted","Data":"e51410b56f0569c5b450bfe7a37b3dc37d5665b6e6c64734fb75dc8dab0896ef"} Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.494320 4701 scope.go:117] "RemoveContainer" containerID="ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b" Nov 21 19:20:28 crc kubenswrapper[4701]: E1121 19:20:28.494504 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.496549 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.508720 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-config-data\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.525433 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-scripts\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.535845 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcvc7\" (UniqueName: \"kubernetes.io/projected/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-kube-api-access-rcvc7\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.601139 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:28 crc kubenswrapper[4701]: E1121 19:20:28.746477 4701 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 21 19:20:28 crc kubenswrapper[4701]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/9d84924e-04b9-4541-bf13-c47781cf883f/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 21 19:20:28 crc kubenswrapper[4701]: > podSandboxID="226cd57a6cfd79181f63e3b2475517c3c33c4b02cd8d3a62b8c34e88c99dec91" Nov 21 19:20:28 crc kubenswrapper[4701]: E1121 19:20:28.746791 4701 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 21 19:20:28 crc kubenswrapper[4701]: container &Container{Name:dnsmasq-dns,Image:38.102.83.164:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n598h74h59ch8ch64h599hf9hf7h668hdch8ch597h65bh59ch8dh6hc7h86h57fh649h75h586h655h57fh58bh54dh564h5b8h68fh54bh55h56dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tpbzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5cb76f667f-tsrk6_openstack(9d84924e-04b9-4541-bf13-c47781cf883f): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/9d84924e-04b9-4541-bf13-c47781cf883f/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 21 19:20:28 crc kubenswrapper[4701]: > logger="UnhandledError" Nov 21 19:20:28 crc kubenswrapper[4701]: E1121 19:20:28.748265 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/9d84924e-04b9-4541-bf13-c47781cf883f/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" podUID="9d84924e-04b9-4541-bf13-c47781cf883f" Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.791777 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-679559bbc5-wxbzl"] Nov 21 19:20:28 crc kubenswrapper[4701]: I1121 19:20:28.903729 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:29 crc kubenswrapper[4701]: I1121 19:20:29.177221 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:29 crc kubenswrapper[4701]: I1121 19:20:29.538816 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" event={"ID":"c4234702-4265-4b3a-ab18-9ba8d244ea33","Type":"ContainerStarted","Data":"e8c70a12416b5309e85170969e76782e13f1d635e04f443ff47c22f173758277"} Nov 21 19:20:29 crc kubenswrapper[4701]: I1121 19:20:29.570973 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a4641c-4d04-4fc6-957b-d49ddff014ab","Type":"ContainerStarted","Data":"b159845f6d1f978ff6653ef4d383e54bd33ec888bf91041bd8a3b7a122c999ca"} Nov 21 19:20:29 crc kubenswrapper[4701]: I1121 19:20:29.580593 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:20:29 crc kubenswrapper[4701]: I1121 19:20:29.594932 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"025b9b80-a0fc-4f59-b25b-d541738c8dfc","Type":"ContainerStarted","Data":"8dca0052c45fdcff2fbb90fa10fa06ea3dc235c51395308f5f61e5425b9c36d8"} Nov 21 19:20:29 crc kubenswrapper[4701]: I1121 19:20:29.941594 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.008851 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fecc4b4-2cc3-4e48-9db1-93b05843825b" path="/var/lib/kubelet/pods/2fecc4b4-2cc3-4e48-9db1-93b05843825b/volumes" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.296170 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.377763 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpbzh\" (UniqueName: \"kubernetes.io/projected/9d84924e-04b9-4541-bf13-c47781cf883f-kube-api-access-tpbzh\") pod \"9d84924e-04b9-4541-bf13-c47781cf883f\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.378075 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-swift-storage-0\") pod \"9d84924e-04b9-4541-bf13-c47781cf883f\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.378226 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-sb\") pod \"9d84924e-04b9-4541-bf13-c47781cf883f\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.378389 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-svc\") pod \"9d84924e-04b9-4541-bf13-c47781cf883f\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.378453 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-nb\") pod \"9d84924e-04b9-4541-bf13-c47781cf883f\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.378492 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-config\") pod \"9d84924e-04b9-4541-bf13-c47781cf883f\" (UID: \"9d84924e-04b9-4541-bf13-c47781cf883f\") " Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.428545 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d84924e-04b9-4541-bf13-c47781cf883f-kube-api-access-tpbzh" (OuterVolumeSpecName: "kube-api-access-tpbzh") pod "9d84924e-04b9-4541-bf13-c47781cf883f" (UID: "9d84924e-04b9-4541-bf13-c47781cf883f"). InnerVolumeSpecName "kube-api-access-tpbzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.500724 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpbzh\" (UniqueName: \"kubernetes.io/projected/9d84924e-04b9-4541-bf13-c47781cf883f-kube-api-access-tpbzh\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.594104 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9d84924e-04b9-4541-bf13-c47781cf883f" (UID: "9d84924e-04b9-4541-bf13-c47781cf883f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.602651 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.619065 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-config" (OuterVolumeSpecName: "config") pod "9d84924e-04b9-4541-bf13-c47781cf883f" (UID: "9d84924e-04b9-4541-bf13-c47781cf883f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.674447 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" event={"ID":"9d84924e-04b9-4541-bf13-c47781cf883f","Type":"ContainerDied","Data":"226cd57a6cfd79181f63e3b2475517c3c33c4b02cd8d3a62b8c34e88c99dec91"} Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.674722 4701 scope.go:117] "RemoveContainer" containerID="cc89b277215fa18a0a438746b00c17cb74473925ee356375bf3ece4761c003a6" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.675001 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb76f667f-tsrk6" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.695377 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9d84924e-04b9-4541-bf13-c47781cf883f" (UID: "9d84924e-04b9-4541-bf13-c47781cf883f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.707362 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9d84924e-04b9-4541-bf13-c47781cf883f" (UID: "9d84924e-04b9-4541-bf13-c47781cf883f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.716584 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"738385cd-0b6b-4141-bcd2-8dc2eeee6eba","Type":"ContainerStarted","Data":"970d96a6567417467804a68bceec83169fdead323c1c450b69e66fdd8297bb50"} Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.720364 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.720394 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-869574dbc6-l96tx" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.721929 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.721954 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.721964 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.758768 4701 generic.go:334] "Generic (PLEG): container finished" podID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerID="55e6ab3dcb0ef36e836fd94d76a324c64c2b2a989d0e9044abb1fe814f2ee7ff" exitCode=0 Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.758817 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" event={"ID":"c4234702-4265-4b3a-ab18-9ba8d244ea33","Type":"ContainerDied","Data":"55e6ab3dcb0ef36e836fd94d76a324c64c2b2a989d0e9044abb1fe814f2ee7ff"} Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.778895 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9d84924e-04b9-4541-bf13-c47781cf883f" (UID: "9d84924e-04b9-4541-bf13-c47781cf883f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.791338 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6c68b8ff68-tfcgs" podUID="7d8b1846-dcd5-49b4-8eb2-74b0462538e1" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:30 crc kubenswrapper[4701]: I1121 19:20:30.826375 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d84924e-04b9-4541-bf13-c47781cf883f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.168234 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5b76b98545-tv4h2" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.178442 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.237675 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cb76f667f-tsrk6"] Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.270878 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cb76f667f-tsrk6"] Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.307328 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8697b77d4d-84dj8"] Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.307804 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8697b77d4d-84dj8" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-api" containerID="cri-o://daeb86b04da5d06629353e1d5951a542bbc5b3a3e4a0797d5922b2a056b643bb" gracePeriod=30 Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.309545 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8697b77d4d-84dj8" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-httpd" containerID="cri-o://72f3c9d31797acd8881ba733353698dc04c69993e3a991fd1273ac0a40cae63e" gracePeriod=30 Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.416891 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:31 crc kubenswrapper[4701]: E1121 19:20:31.582916 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d84924e_04b9_4541_bf13_c47781cf883f.slice/crio-226cd57a6cfd79181f63e3b2475517c3c33c4b02cd8d3a62b8c34e88c99dec91\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d84924e_04b9_4541_bf13_c47781cf883f.slice\": RecentStats: unable to find data in memory cache]" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.776544 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-796cb85bf8-h88pn" podUID="59b306d9-cacf-4e38-b19f-60f8ebe026a7" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.174:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.777352 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.830556 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"025b9b80-a0fc-4f59-b25b-d541738c8dfc","Type":"ContainerStarted","Data":"3b60c6eea761331e4a9cda0780211b6fe8bd85b1a6f04811e2ec82a1b5b3b868"} Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.833616 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api-log" containerID="cri-o://8dca0052c45fdcff2fbb90fa10fa06ea3dc235c51395308f5f61e5425b9c36d8" gracePeriod=30 Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.834142 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api" containerID="cri-o://3b60c6eea761331e4a9cda0780211b6fe8bd85b1a6f04811e2ec82a1b5b3b868" gracePeriod=30 Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.852119 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1d332f3a-bf5e-4154-93c9-36265e2b9a3d","Type":"ContainerStarted","Data":"5f67ebe31c816b2e6c4f0b01d62f9dca52b2c50cdd8ce57afa301e1a23e2e203"} Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.902080 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"738385cd-0b6b-4141-bcd2-8dc2eeee6eba","Type":"ContainerStarted","Data":"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51"} Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.930648 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.930627271 podStartE2EDuration="7.930627271s" podCreationTimestamp="2025-11-21 19:20:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:31.879551982 +0000 UTC m=+1122.664692029" watchObservedRunningTime="2025-11-21 19:20:31.930627271 +0000 UTC m=+1122.715767298" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.970057 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.60605408 podStartE2EDuration="7.970031818s" podCreationTimestamp="2025-11-21 19:20:24 +0000 UTC" firstStartedPulling="2025-11-21 19:20:25.373431084 +0000 UTC m=+1116.158571111" lastFinishedPulling="2025-11-21 19:20:25.737408822 +0000 UTC m=+1116.522548849" observedRunningTime="2025-11-21 19:20:31.918058325 +0000 UTC m=+1122.703198342" watchObservedRunningTime="2025-11-21 19:20:31.970031818 +0000 UTC m=+1122.755171845" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.993858 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d84924e-04b9-4541-bf13-c47781cf883f" path="/var/lib/kubelet/pods/9d84924e-04b9-4541-bf13-c47781cf883f/volumes" Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.994732 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a4641c-4d04-4fc6-957b-d49ddff014ab","Type":"ContainerStarted","Data":"46c216712e46618ea324d7bce516d277095ab73c90d97752255283900fec2829"} Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.995286 4701 generic.go:334] "Generic (PLEG): container finished" podID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerID="72f3c9d31797acd8881ba733353698dc04c69993e3a991fd1273ac0a40cae63e" exitCode=0 Nov 21 19:20:31 crc kubenswrapper[4701]: I1121 19:20:31.996578 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8697b77d4d-84dj8" event={"ID":"6084dcd4-0556-4a7d-b880-1979a9c36609","Type":"ContainerDied","Data":"72f3c9d31797acd8881ba733353698dc04c69993e3a991fd1273ac0a40cae63e"} Nov 21 19:20:32 crc kubenswrapper[4701]: I1121 19:20:32.374162 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-796cb85bf8-h88pn" Nov 21 19:20:32 crc kubenswrapper[4701]: I1121 19:20:32.507415 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5c5f685fb-t5wpk"] Nov 21 19:20:32 crc kubenswrapper[4701]: I1121 19:20:32.507755 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" containerID="cri-o://8654f42a09f9d1570c25cfce1f5f3c459a60f7a17f9a2a9763183d04aaad1230" gracePeriod=30 Nov 21 19:20:32 crc kubenswrapper[4701]: I1121 19:20:32.508177 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" containerID="cri-o://d0c41d2af7ff5fbb1537a95f6b77d61a9ea9f0442bd7f7feface68c45bd96eeb" gracePeriod=30 Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.071705 4701 generic.go:334] "Generic (PLEG): container finished" podID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerID="8dca0052c45fdcff2fbb90fa10fa06ea3dc235c51395308f5f61e5425b9c36d8" exitCode=143 Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.072239 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"025b9b80-a0fc-4f59-b25b-d541738c8dfc","Type":"ContainerDied","Data":"8dca0052c45fdcff2fbb90fa10fa06ea3dc235c51395308f5f61e5425b9c36d8"} Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.101768 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"738385cd-0b6b-4141-bcd2-8dc2eeee6eba","Type":"ContainerStarted","Data":"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3"} Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.101762 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-log" containerID="cri-o://817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51" gracePeriod=30 Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.101914 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-httpd" containerID="cri-o://a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3" gracePeriod=30 Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.128515 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.128489826 podStartE2EDuration="7.128489826s" podCreationTimestamp="2025-11-21 19:20:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:33.122080374 +0000 UTC m=+1123.907220391" watchObservedRunningTime="2025-11-21 19:20:33.128489826 +0000 UTC m=+1123.913629843" Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.156168 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" event={"ID":"c4234702-4265-4b3a-ab18-9ba8d244ea33","Type":"ContainerStarted","Data":"c9e99e980d9315c400d62de2d04b8c8d3eb4e81d8e2179a33e5b4c9591156de5"} Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.158379 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.176088 4701 generic.go:334] "Generic (PLEG): container finished" podID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerID="8654f42a09f9d1570c25cfce1f5f3c459a60f7a17f9a2a9763183d04aaad1230" exitCode=143 Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.178322 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c5f685fb-t5wpk" event={"ID":"f658fc90-2c53-4cdd-b411-16ccb58f7625","Type":"ContainerDied","Data":"8654f42a09f9d1570c25cfce1f5f3c459a60f7a17f9a2a9763183d04aaad1230"} Nov 21 19:20:33 crc kubenswrapper[4701]: I1121 19:20:33.206711 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" podStartSLOduration=7.206690163 podStartE2EDuration="7.206690163s" podCreationTimestamp="2025-11-21 19:20:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:33.186054459 +0000 UTC m=+1123.971194486" watchObservedRunningTime="2025-11-21 19:20:33.206690163 +0000 UTC m=+1123.991830190" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.161755 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.199566 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a4641c-4d04-4fc6-957b-d49ddff014ab","Type":"ContainerStarted","Data":"d59a92b68d2e6bb62ceb2c4452587ec22c0b06e3bc48ff3a4b4000af7d6ca61a"} Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.199777 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-log" containerID="cri-o://46c216712e46618ea324d7bce516d277095ab73c90d97752255283900fec2829" gracePeriod=30 Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.199900 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-httpd" containerID="cri-o://d59a92b68d2e6bb62ceb2c4452587ec22c0b06e3bc48ff3a4b4000af7d6ca61a" gracePeriod=30 Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.229470 4701 generic.go:334] "Generic (PLEG): container finished" podID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerID="a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3" exitCode=143 Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.229882 4701 generic.go:334] "Generic (PLEG): container finished" podID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerID="817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51" exitCode=143 Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.230889 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.231218 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"738385cd-0b6b-4141-bcd2-8dc2eeee6eba","Type":"ContainerDied","Data":"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3"} Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.231342 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"738385cd-0b6b-4141-bcd2-8dc2eeee6eba","Type":"ContainerDied","Data":"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51"} Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.231426 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"738385cd-0b6b-4141-bcd2-8dc2eeee6eba","Type":"ContainerDied","Data":"970d96a6567417467804a68bceec83169fdead323c1c450b69e66fdd8297bb50"} Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.231536 4701 scope.go:117] "RemoveContainer" containerID="a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.254306 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.254289529 podStartE2EDuration="8.254289529s" podCreationTimestamp="2025-11-21 19:20:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:34.252543392 +0000 UTC m=+1125.037683419" watchObservedRunningTime="2025-11-21 19:20:34.254289529 +0000 UTC m=+1125.039429556" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.274848 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-logs\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.274911 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcvc7\" (UniqueName: \"kubernetes.io/projected/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-kube-api-access-rcvc7\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.274990 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-config-data\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.275050 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.275133 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-httpd-run\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.275157 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-scripts\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.275300 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-combined-ca-bundle\") pod \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\" (UID: \"738385cd-0b6b-4141-bcd2-8dc2eeee6eba\") " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.276647 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-logs" (OuterVolumeSpecName: "logs") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.276844 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.290435 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.298485 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-kube-api-access-rcvc7" (OuterVolumeSpecName: "kube-api-access-rcvc7") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "kube-api-access-rcvc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.305457 4701 scope.go:117] "RemoveContainer" containerID="817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.313712 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-scripts" (OuterVolumeSpecName: "scripts") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.363450 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.381705 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcvc7\" (UniqueName: \"kubernetes.io/projected/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-kube-api-access-rcvc7\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.381752 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.381763 4701 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.381771 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.381782 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.381790 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.491482 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.504578 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-config-data" (OuterVolumeSpecName: "config-data") pod "738385cd-0b6b-4141-bcd2-8dc2eeee6eba" (UID: "738385cd-0b6b-4141-bcd2-8dc2eeee6eba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.519512 4701 scope.go:117] "RemoveContainer" containerID="a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3" Nov 21 19:20:34 crc kubenswrapper[4701]: E1121 19:20:34.520673 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3\": container with ID starting with a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3 not found: ID does not exist" containerID="a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.520765 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3"} err="failed to get container status \"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3\": rpc error: code = NotFound desc = could not find container \"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3\": container with ID starting with a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3 not found: ID does not exist" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.520927 4701 scope.go:117] "RemoveContainer" containerID="817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51" Nov 21 19:20:34 crc kubenswrapper[4701]: E1121 19:20:34.521399 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51\": container with ID starting with 817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51 not found: ID does not exist" containerID="817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.521471 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51"} err="failed to get container status \"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51\": rpc error: code = NotFound desc = could not find container \"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51\": container with ID starting with 817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51 not found: ID does not exist" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.521542 4701 scope.go:117] "RemoveContainer" containerID="a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.521820 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3"} err="failed to get container status \"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3\": rpc error: code = NotFound desc = could not find container \"a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3\": container with ID starting with a054ec48ea542051afefd9a64e11312c792f753bfa84389bcbbbf6516ddde0a3 not found: ID does not exist" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.521895 4701 scope.go:117] "RemoveContainer" containerID="817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.522194 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51"} err="failed to get container status \"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51\": rpc error: code = NotFound desc = could not find container \"817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51\": container with ID starting with 817fbb1e51dcbb6f2f6273a199e56eeb027e0458ec9f3195dae9373709451a51 not found: ID does not exist" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.582111 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.588108 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738385cd-0b6b-4141-bcd2-8dc2eeee6eba-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.588691 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.610989 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.626141 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:34 crc kubenswrapper[4701]: E1121 19:20:34.626930 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-log" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.627002 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-log" Nov 21 19:20:34 crc kubenswrapper[4701]: E1121 19:20:34.627106 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-httpd" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.627157 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-httpd" Nov 21 19:20:34 crc kubenswrapper[4701]: E1121 19:20:34.627246 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d84924e-04b9-4541-bf13-c47781cf883f" containerName="init" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.627303 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d84924e-04b9-4541-bf13-c47781cf883f" containerName="init" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.627563 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-httpd" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.627626 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d84924e-04b9-4541-bf13-c47781cf883f" containerName="init" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.627685 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" containerName="glance-log" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.628909 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.635077 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.635488 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.645327 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.650403 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.650766 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.175:8080/\": dial tcp 10.217.0.175:8080: connect: connection refused" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691371 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8flk\" (UniqueName: \"kubernetes.io/projected/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-kube-api-access-j8flk\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691435 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691462 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691482 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691579 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691631 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691654 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.691685 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793566 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793675 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793700 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793776 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793873 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8flk\" (UniqueName: \"kubernetes.io/projected/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-kube-api-access-j8flk\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793907 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793934 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.793960 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.798037 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.799436 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.798477 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.798288 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.804788 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.810751 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.814918 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.830399 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8flk\" (UniqueName: \"kubernetes.io/projected/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-kube-api-access-j8flk\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.841239 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:20:34 crc kubenswrapper[4701]: I1121 19:20:34.954886 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.187813 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.303462 4701 generic.go:334] "Generic (PLEG): container finished" podID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerID="d59a92b68d2e6bb62ceb2c4452587ec22c0b06e3bc48ff3a4b4000af7d6ca61a" exitCode=0 Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.303516 4701 generic.go:334] "Generic (PLEG): container finished" podID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerID="46c216712e46618ea324d7bce516d277095ab73c90d97752255283900fec2829" exitCode=143 Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.303626 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a4641c-4d04-4fc6-957b-d49ddff014ab","Type":"ContainerDied","Data":"d59a92b68d2e6bb62ceb2c4452587ec22c0b06e3bc48ff3a4b4000af7d6ca61a"} Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.303664 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a4641c-4d04-4fc6-957b-d49ddff014ab","Type":"ContainerDied","Data":"46c216712e46618ea324d7bce516d277095ab73c90d97752255283900fec2829"} Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.673846 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": read tcp 10.217.0.2:42578->10.217.0.172:9311: read: connection reset by peer" Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.673919 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": read tcp 10.217.0.2:42568->10.217.0.172:9311: read: connection reset by peer" Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.787566 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:20:35 crc kubenswrapper[4701]: I1121 19:20:35.968615 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="738385cd-0b6b-4141-bcd2-8dc2eeee6eba" path="/var/lib/kubelet/pods/738385cd-0b6b-4141-bcd2-8dc2eeee6eba/volumes" Nov 21 19:20:36 crc kubenswrapper[4701]: I1121 19:20:36.335000 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 21 19:20:36 crc kubenswrapper[4701]: I1121 19:20:36.343823 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 21 19:20:36 crc kubenswrapper[4701]: I1121 19:20:36.348402 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c5f685fb-t5wpk" event={"ID":"f658fc90-2c53-4cdd-b411-16ccb58f7625","Type":"ContainerDied","Data":"d0c41d2af7ff5fbb1537a95f6b77d61a9ea9f0442bd7f7feface68c45bd96eeb"} Nov 21 19:20:36 crc kubenswrapper[4701]: I1121 19:20:36.348596 4701 generic.go:334] "Generic (PLEG): container finished" podID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerID="d0c41d2af7ff5fbb1537a95f6b77d61a9ea9f0442bd7f7feface68c45bd96eeb" exitCode=0 Nov 21 19:20:36 crc kubenswrapper[4701]: I1121 19:20:36.354361 4701 generic.go:334] "Generic (PLEG): container finished" podID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerID="daeb86b04da5d06629353e1d5951a542bbc5b3a3e4a0797d5922b2a056b643bb" exitCode=0 Nov 21 19:20:36 crc kubenswrapper[4701]: I1121 19:20:36.354430 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8697b77d4d-84dj8" event={"ID":"6084dcd4-0556-4a7d-b880-1979a9c36609","Type":"ContainerDied","Data":"daeb86b04da5d06629353e1d5951a542bbc5b3a3e4a0797d5922b2a056b643bb"} Nov 21 19:20:37 crc kubenswrapper[4701]: I1121 19:20:37.630765 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:20:37 crc kubenswrapper[4701]: I1121 19:20:37.907904 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:20:37 crc kubenswrapper[4701]: I1121 19:20:37.987948 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-645bbc848c-zl97c"] Nov 21 19:20:37 crc kubenswrapper[4701]: I1121 19:20:37.988594 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="dnsmasq-dns" containerID="cri-o://deac78af57938ba8c2702d53fab56213ab3eeaba278c20bd4e58e157fcca19ca" gracePeriod=10 Nov 21 19:20:38 crc kubenswrapper[4701]: I1121 19:20:38.050096 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:20:38 crc kubenswrapper[4701]: I1121 19:20:38.396092 4701 generic.go:334] "Generic (PLEG): container finished" podID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerID="deac78af57938ba8c2702d53fab56213ab3eeaba278c20bd4e58e157fcca19ca" exitCode=0 Nov 21 19:20:38 crc kubenswrapper[4701]: I1121 19:20:38.396162 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" event={"ID":"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe","Type":"ContainerDied","Data":"deac78af57938ba8c2702d53fab56213ab3eeaba278c20bd4e58e157fcca19ca"} Nov 21 19:20:38 crc kubenswrapper[4701]: I1121 19:20:38.643134 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": dial tcp 10.217.0.172:9311: connect: connection refused" Nov 21 19:20:38 crc kubenswrapper[4701]: I1121 19:20:38.643379 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": dial tcp 10.217.0.172:9311: connect: connection refused" Nov 21 19:20:38 crc kubenswrapper[4701]: I1121 19:20:38.951332 4701 scope.go:117] "RemoveContainer" containerID="ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b" Nov 21 19:20:39 crc kubenswrapper[4701]: I1121 19:20:39.423410 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:20:39 crc kubenswrapper[4701]: I1121 19:20:39.854126 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 21 19:20:39 crc kubenswrapper[4701]: I1121 19:20:39.907946 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:40 crc kubenswrapper[4701]: I1121 19:20:40.291001 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6c68b8ff68-tfcgs" Nov 21 19:20:40 crc kubenswrapper[4701]: I1121 19:20:40.399493 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-869574dbc6-l96tx"] Nov 21 19:20:40 crc kubenswrapper[4701]: I1121 19:20:40.431721 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="cinder-scheduler" containerID="cri-o://e51410b56f0569c5b450bfe7a37b3dc37d5665b6e6c64734fb75dc8dab0896ef" gracePeriod=30 Nov 21 19:20:40 crc kubenswrapper[4701]: I1121 19:20:40.431925 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-869574dbc6-l96tx" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon-log" containerID="cri-o://430f8778df60ba3d059bc9aa9fa12d81c20d41994db5d5fd007530b6d67dbe5f" gracePeriod=30 Nov 21 19:20:40 crc kubenswrapper[4701]: I1121 19:20:40.432047 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="probe" containerID="cri-o://5f67ebe31c816b2e6c4f0b01d62f9dca52b2c50cdd8ce57afa301e1a23e2e203" gracePeriod=30 Nov 21 19:20:40 crc kubenswrapper[4701]: I1121 19:20:40.432243 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-869574dbc6-l96tx" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" containerID="cri-o://bec4522f6cfd9c8b102a94c7b53c32fdf41063ea44f1a6d97edd1f49d43218f1" gracePeriod=30 Nov 21 19:20:41 crc kubenswrapper[4701]: I1121 19:20:41.446733 4701 generic.go:334] "Generic (PLEG): container finished" podID="1c543587-173c-4fb2-b730-72b848f845d6" containerID="bec4522f6cfd9c8b102a94c7b53c32fdf41063ea44f1a6d97edd1f49d43218f1" exitCode=0 Nov 21 19:20:41 crc kubenswrapper[4701]: I1121 19:20:41.446805 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869574dbc6-l96tx" event={"ID":"1c543587-173c-4fb2-b730-72b848f845d6","Type":"ContainerDied","Data":"bec4522f6cfd9c8b102a94c7b53c32fdf41063ea44f1a6d97edd1f49d43218f1"} Nov 21 19:20:42 crc kubenswrapper[4701]: W1121 19:20:42.287588 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5b0816b_f10a_4e0a_86bc_3f0eda5253a6.slice/crio-cfc39a83c45eb7d4928203c37a9f852116aa6eabd6a1c0f1fb48b098cab6484d WatchSource:0}: Error finding container cfc39a83c45eb7d4928203c37a9f852116aa6eabd6a1c0f1fb48b098cab6484d: Status 404 returned error can't find the container with id cfc39a83c45eb7d4928203c37a9f852116aa6eabd6a1c0f1fb48b098cab6484d Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.438899 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.457794 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539586 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-httpd-config\") pod \"6084dcd4-0556-4a7d-b880-1979a9c36609\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539738 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-combined-ca-bundle\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539785 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-scripts\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539817 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-ovndb-tls-certs\") pod \"6084dcd4-0556-4a7d-b880-1979a9c36609\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539868 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-config-data\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539924 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-logs\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.539965 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.540048 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-combined-ca-bundle\") pod \"6084dcd4-0556-4a7d-b880-1979a9c36609\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.540089 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp59s\" (UniqueName: \"kubernetes.io/projected/6084dcd4-0556-4a7d-b880-1979a9c36609-kube-api-access-pp59s\") pod \"6084dcd4-0556-4a7d-b880-1979a9c36609\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.540119 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6mnh\" (UniqueName: \"kubernetes.io/projected/d4a4641c-4d04-4fc6-957b-d49ddff014ab-kube-api-access-b6mnh\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.540142 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-config\") pod \"6084dcd4-0556-4a7d-b880-1979a9c36609\" (UID: \"6084dcd4-0556-4a7d-b880-1979a9c36609\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.540211 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-httpd-run\") pod \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\" (UID: \"d4a4641c-4d04-4fc6-957b-d49ddff014ab\") " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.546178 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.553749 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-logs" (OuterVolumeSpecName: "logs") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.555121 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6084dcd4-0556-4a7d-b880-1979a9c36609" (UID: "6084dcd4-0556-4a7d-b880-1979a9c36609"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.567713 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6084dcd4-0556-4a7d-b880-1979a9c36609-kube-api-access-pp59s" (OuterVolumeSpecName: "kube-api-access-pp59s") pod "6084dcd4-0556-4a7d-b880-1979a9c36609" (UID: "6084dcd4-0556-4a7d-b880-1979a9c36609"). InnerVolumeSpecName "kube-api-access-pp59s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.567974 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-scripts" (OuterVolumeSpecName: "scripts") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.582022 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a4641c-4d04-4fc6-957b-d49ddff014ab","Type":"ContainerDied","Data":"b159845f6d1f978ff6653ef4d383e54bd33ec888bf91041bd8a3b7a122c999ca"} Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.582134 4701 scope.go:117] "RemoveContainer" containerID="d59a92b68d2e6bb62ceb2c4452587ec22c0b06e3bc48ff3a4b4000af7d6ca61a" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.582421 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.614444 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.630567 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8697b77d4d-84dj8" event={"ID":"6084dcd4-0556-4a7d-b880-1979a9c36609","Type":"ContainerDied","Data":"6baa75c0a1957f76a0c5323943023519bfa2a1222f56a93100f7801082a125fb"} Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.633326 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8697b77d4d-84dj8" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.663395 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.663666 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4a4641c-4d04-4fc6-957b-d49ddff014ab-kube-api-access-b6mnh" (OuterVolumeSpecName: "kube-api-access-b6mnh") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "kube-api-access-b6mnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.665654 4701 generic.go:334] "Generic (PLEG): container finished" podID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerID="5f67ebe31c816b2e6c4f0b01d62f9dca52b2c50cdd8ce57afa301e1a23e2e203" exitCode=0 Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.665724 4701 generic.go:334] "Generic (PLEG): container finished" podID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerID="e51410b56f0569c5b450bfe7a37b3dc37d5665b6e6c64734fb75dc8dab0896ef" exitCode=0 Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.665815 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1d332f3a-bf5e-4154-93c9-36265e2b9a3d","Type":"ContainerDied","Data":"5f67ebe31c816b2e6c4f0b01d62f9dca52b2c50cdd8ce57afa301e1a23e2e203"} Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.665884 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1d332f3a-bf5e-4154-93c9-36265e2b9a3d","Type":"ContainerDied","Data":"e51410b56f0569c5b450bfe7a37b3dc37d5665b6e6c64734fb75dc8dab0896ef"} Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.668268 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6","Type":"ContainerStarted","Data":"cfc39a83c45eb7d4928203c37a9f852116aa6eabd6a1c0f1fb48b098cab6484d"} Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674425 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp59s\" (UniqueName: \"kubernetes.io/projected/6084dcd4-0556-4a7d-b880-1979a9c36609-kube-api-access-pp59s\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674461 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6mnh\" (UniqueName: \"kubernetes.io/projected/d4a4641c-4d04-4fc6-957b-d49ddff014ab-kube-api-access-b6mnh\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674473 4701 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674482 4701 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674490 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674500 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674510 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a4641c-4d04-4fc6-957b-d49ddff014ab-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.674533 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.698327 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.703453 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-config-data" (OuterVolumeSpecName: "config-data") pod "d4a4641c-4d04-4fc6-957b-d49ddff014ab" (UID: "d4a4641c-4d04-4fc6-957b-d49ddff014ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.765356 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6084dcd4-0556-4a7d-b880-1979a9c36609" (UID: "6084dcd4-0556-4a7d-b880-1979a9c36609"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.776837 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.776886 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a4641c-4d04-4fc6-957b-d49ddff014ab-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.776898 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.790327 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-config" (OuterVolumeSpecName: "config") pod "6084dcd4-0556-4a7d-b880-1979a9c36609" (UID: "6084dcd4-0556-4a7d-b880-1979a9c36609"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.821437 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "6084dcd4-0556-4a7d-b880-1979a9c36609" (UID: "6084dcd4-0556-4a7d-b880-1979a9c36609"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.878628 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.878660 4701 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6084dcd4-0556-4a7d-b880-1979a9c36609-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.921692 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.929439 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.954476 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:42 crc kubenswrapper[4701]: E1121 19:20:42.955018 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-httpd" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955039 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-httpd" Nov 21 19:20:42 crc kubenswrapper[4701]: E1121 19:20:42.955066 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-log" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955074 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-log" Nov 21 19:20:42 crc kubenswrapper[4701]: E1121 19:20:42.955086 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-api" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955094 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-api" Nov 21 19:20:42 crc kubenswrapper[4701]: E1121 19:20:42.955100 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-httpd" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955108 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-httpd" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955360 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-api" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955385 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" containerName="neutron-httpd" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955401 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-httpd" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.955415 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" containerName="glance-log" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.960554 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.963998 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.964298 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 19:20:42 crc kubenswrapper[4701]: I1121 19:20:42.977926 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.041611 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8697b77d4d-84dj8"] Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.060838 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8697b77d4d-84dj8"] Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095519 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cchn7\" (UniqueName: \"kubernetes.io/projected/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-kube-api-access-cchn7\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095565 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095622 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-scripts\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095648 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095684 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-config-data\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095715 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.095758 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-logs\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197273 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cchn7\" (UniqueName: \"kubernetes.io/projected/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-kube-api-access-cchn7\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197326 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197366 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197383 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-scripts\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197402 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197418 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-config-data\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197445 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.197480 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-logs\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.198064 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-logs\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.199069 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.199477 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.205435 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.206440 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-scripts\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.207682 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-config-data\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.211221 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.231859 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cchn7\" (UniqueName: \"kubernetes.io/projected/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-kube-api-access-cchn7\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.241449 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.315996 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.844408 4701 scope.go:117] "RemoveContainer" containerID="46c216712e46618ea324d7bce516d277095ab73c90d97752255283900fec2829" Nov 21 19:20:43 crc kubenswrapper[4701]: I1121 19:20:43.969445 4701 scope.go:117] "RemoveContainer" containerID="72f3c9d31797acd8881ba733353698dc04c69993e3a991fd1273ac0a40cae63e" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.009280 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6084dcd4-0556-4a7d-b880-1979a9c36609" path="/var/lib/kubelet/pods/6084dcd4-0556-4a7d-b880-1979a9c36609/volumes" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.010241 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4a4641c-4d04-4fc6-957b-d49ddff014ab" path="/var/lib/kubelet/pods/d4a4641c-4d04-4fc6-957b-d49ddff014ab/volumes" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.015503 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.029057 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.072121 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.120618 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data-custom\") pod \"f658fc90-2c53-4cdd-b411-16ccb58f7625\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.120661 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-combined-ca-bundle\") pod \"f658fc90-2c53-4cdd-b411-16ccb58f7625\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.120739 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-scripts\") pod \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.120868 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-combined-ca-bundle\") pod \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.120935 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data-custom\") pod \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.120991 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data\") pod \"f658fc90-2c53-4cdd-b411-16ccb58f7625\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.121019 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data\") pod \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.121085 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-etc-machine-id\") pod \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.121131 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqc5m\" (UniqueName: \"kubernetes.io/projected/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-kube-api-access-nqc5m\") pod \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\" (UID: \"1d332f3a-bf5e-4154-93c9-36265e2b9a3d\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.121188 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f658fc90-2c53-4cdd-b411-16ccb58f7625-logs\") pod \"f658fc90-2c53-4cdd-b411-16ccb58f7625\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.121338 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56djv\" (UniqueName: \"kubernetes.io/projected/f658fc90-2c53-4cdd-b411-16ccb58f7625-kube-api-access-56djv\") pod \"f658fc90-2c53-4cdd-b411-16ccb58f7625\" (UID: \"f658fc90-2c53-4cdd-b411-16ccb58f7625\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.128332 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1d332f3a-bf5e-4154-93c9-36265e2b9a3d" (UID: "1d332f3a-bf5e-4154-93c9-36265e2b9a3d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.142103 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f658fc90-2c53-4cdd-b411-16ccb58f7625-logs" (OuterVolumeSpecName: "logs") pod "f658fc90-2c53-4cdd-b411-16ccb58f7625" (UID: "f658fc90-2c53-4cdd-b411-16ccb58f7625"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.161664 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.198362 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f658fc90-2c53-4cdd-b411-16ccb58f7625" (UID: "f658fc90-2c53-4cdd-b411-16ccb58f7625"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.198498 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-kube-api-access-nqc5m" (OuterVolumeSpecName: "kube-api-access-nqc5m") pod "1d332f3a-bf5e-4154-93c9-36265e2b9a3d" (UID: "1d332f3a-bf5e-4154-93c9-36265e2b9a3d"). InnerVolumeSpecName "kube-api-access-nqc5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.200519 4701 scope.go:117] "RemoveContainer" containerID="daeb86b04da5d06629353e1d5951a542bbc5b3a3e4a0797d5922b2a056b643bb" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.201430 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1d332f3a-bf5e-4154-93c9-36265e2b9a3d" (UID: "1d332f3a-bf5e-4154-93c9-36265e2b9a3d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.201524 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-78b94b5b48-685pj" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.212981 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f658fc90-2c53-4cdd-b411-16ccb58f7625-kube-api-access-56djv" (OuterVolumeSpecName: "kube-api-access-56djv") pod "f658fc90-2c53-4cdd-b411-16ccb58f7625" (UID: "f658fc90-2c53-4cdd-b411-16ccb58f7625"). InnerVolumeSpecName "kube-api-access-56djv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.223161 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-svc\") pod \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.223355 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-config\") pod \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.223446 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbj2t\" (UniqueName: \"kubernetes.io/projected/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-kube-api-access-nbj2t\") pod \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.223582 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-swift-storage-0\") pod \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.223644 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-sb\") pod \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.223718 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-nb\") pod \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\" (UID: \"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe\") " Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.226327 4701 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.226343 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqc5m\" (UniqueName: \"kubernetes.io/projected/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-kube-api-access-nqc5m\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.226357 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f658fc90-2c53-4cdd-b411-16ccb58f7625-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.226367 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56djv\" (UniqueName: \"kubernetes.io/projected/f658fc90-2c53-4cdd-b411-16ccb58f7625-kube-api-access-56djv\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.226379 4701 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.226389 4701 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.245661 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-scripts" (OuterVolumeSpecName: "scripts") pod "1d332f3a-bf5e-4154-93c9-36265e2b9a3d" (UID: "1d332f3a-bf5e-4154-93c9-36265e2b9a3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.252555 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.286586 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-kube-api-access-nbj2t" (OuterVolumeSpecName: "kube-api-access-nbj2t") pod "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" (UID: "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe"). InnerVolumeSpecName "kube-api-access-nbj2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.333223 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.333259 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbj2t\" (UniqueName: \"kubernetes.io/projected/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-kube-api-access-nbj2t\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.356793 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f658fc90-2c53-4cdd-b411-16ccb58f7625" (UID: "f658fc90-2c53-4cdd-b411-16ccb58f7625"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.443440 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.444339 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d332f3a-bf5e-4154-93c9-36265e2b9a3d" (UID: "1d332f3a-bf5e-4154-93c9-36265e2b9a3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.501441 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data" (OuterVolumeSpecName: "config-data") pod "f658fc90-2c53-4cdd-b411-16ccb58f7625" (UID: "f658fc90-2c53-4cdd-b411-16ccb58f7625"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.554112 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f658fc90-2c53-4cdd-b411-16ccb58f7625-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.554148 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.584087 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" (UID: "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.618142 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" (UID: "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.628801 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-65d76b5c54-c9d89" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.662246 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" (UID: "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.673712 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-config" (OuterVolumeSpecName: "config") pod "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" (UID: "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.682161 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.682190 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.682220 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.682229 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.700830 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" (UID: "b3255fe0-3c69-4ce9-a9e7-823c35dcebbe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.717394 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" event={"ID":"b3255fe0-3c69-4ce9-a9e7-823c35dcebbe","Type":"ContainerDied","Data":"fb101576134e4e8d9df6cb6d744355fb6d78d3819313182c4fe1629502b7473a"} Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.717473 4701 scope.go:117] "RemoveContainer" containerID="deac78af57938ba8c2702d53fab56213ab3eeaba278c20bd4e58e157fcca19ca" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.717628 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.717621 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data" (OuterVolumeSpecName: "config-data") pod "1d332f3a-bf5e-4154-93c9-36265e2b9a3d" (UID: "1d332f3a-bf5e-4154-93c9-36265e2b9a3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.736673 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c5f685fb-t5wpk" event={"ID":"f658fc90-2c53-4cdd-b411-16ccb58f7625","Type":"ContainerDied","Data":"7025fb1aa63d4f90a38c93f92d07cd72c8fee51fdaede4c67a01d64014f3962b"} Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.739720 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c5f685fb-t5wpk" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.782719 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.782960 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1d332f3a-bf5e-4154-93c9-36265e2b9a3d","Type":"ContainerDied","Data":"79f019768ec73633a9069b464ee024f48a384653746a4c9de5d15fe1f91649d6"} Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.785835 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d332f3a-bf5e-4154-93c9-36265e2b9a3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.785869 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.795650 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerStarted","Data":"a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6"} Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.807138 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.840526 4701 scope.go:117] "RemoveContainer" containerID="334e96c104352661af00d662bbd12d44d1abd6d9768b71563c10d380ffc573a1" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.857394 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-645bbc848c-zl97c"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.883773 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-645bbc848c-zl97c"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.935919 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.948555 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.956732 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5c5f685fb-t5wpk"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.968423 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5c5f685fb-t5wpk"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976274 4701 scope.go:117] "RemoveContainer" containerID="d0c41d2af7ff5fbb1537a95f6b77d61a9ea9f0442bd7f7feface68c45bd96eeb" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976403 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:44 crc kubenswrapper[4701]: E1121 19:20:44.976807 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="dnsmasq-dns" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976819 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="dnsmasq-dns" Nov 21 19:20:44 crc kubenswrapper[4701]: E1121 19:20:44.976832 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976838 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" Nov 21 19:20:44 crc kubenswrapper[4701]: E1121 19:20:44.976865 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="init" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976871 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="init" Nov 21 19:20:44 crc kubenswrapper[4701]: E1121 19:20:44.976882 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976888 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" Nov 21 19:20:44 crc kubenswrapper[4701]: E1121 19:20:44.976906 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="cinder-scheduler" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976912 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="cinder-scheduler" Nov 21 19:20:44 crc kubenswrapper[4701]: E1121 19:20:44.976939 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="probe" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.976946 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="probe" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.977233 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="probe" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.977248 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" containerName="cinder-scheduler" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.977265 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.977274 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.977291 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="dnsmasq-dns" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.978519 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.985637 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:44 crc kubenswrapper[4701]: I1121 19:20:44.988643 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.022638 4701 scope.go:117] "RemoveContainer" containerID="8654f42a09f9d1570c25cfce1f5f3c459a60f7a17f9a2a9763183d04aaad1230" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.060921 4701 scope.go:117] "RemoveContainer" containerID="5f67ebe31c816b2e6c4f0b01d62f9dca52b2c50cdd8ce57afa301e1a23e2e203" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.101402 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.101520 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.101548 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5pvs\" (UniqueName: \"kubernetes.io/projected/4b72befc-936a-4833-8e7a-f765c655a300-kube-api-access-h5pvs\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.101622 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4b72befc-936a-4833-8e7a-f765c655a300-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.101658 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-scripts\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.101688 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-config-data\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.117970 4701 scope.go:117] "RemoveContainer" containerID="e51410b56f0569c5b450bfe7a37b3dc37d5665b6e6c64734fb75dc8dab0896ef" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.203106 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.203212 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.203239 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5pvs\" (UniqueName: \"kubernetes.io/projected/4b72befc-936a-4833-8e7a-f765c655a300-kube-api-access-h5pvs\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.203289 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4b72befc-936a-4833-8e7a-f765c655a300-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.203316 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-scripts\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.203334 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-config-data\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.205996 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4b72befc-936a-4833-8e7a-f765c655a300-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.208039 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-scripts\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.210906 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.211582 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-config-data\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.216966 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b72befc-936a-4833-8e7a-f765c655a300-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.219539 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5pvs\" (UniqueName: \"kubernetes.io/projected/4b72befc-936a-4833-8e7a-f765c655a300-kube-api-access-h5pvs\") pod \"cinder-scheduler-0\" (UID: \"4b72befc-936a-4833-8e7a-f765c655a300\") " pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.316564 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.664891 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-869574dbc6-l96tx" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.158:8443: connect: connection refused" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.821718 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerStarted","Data":"cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27"} Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.822028 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-central-agent" containerID="cri-o://b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be" gracePeriod=30 Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.822458 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.822884 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="proxy-httpd" containerID="cri-o://cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27" gracePeriod=30 Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.822963 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="sg-core" containerID="cri-o://bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e" gracePeriod=30 Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.823013 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-notification-agent" containerID="cri-o://a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93" gracePeriod=30 Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.834998 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07d6bc32-28a2-40bf-bd2e-9454dfaac91f","Type":"ContainerStarted","Data":"027dbad315dc1cea733237b8d6c9bac7e53b1dccb09b2f3be95ad24b224a276d"} Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.864310 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6","Type":"ContainerStarted","Data":"d51ba6844549ff4e635dc6a826803800a634850d1c97119683ae52f8c6de2946"} Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.869318 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.877279 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.97221312 podStartE2EDuration="1m29.877251799s" podCreationTimestamp="2025-11-21 19:19:16 +0000 UTC" firstStartedPulling="2025-11-21 19:19:17.956051847 +0000 UTC m=+1048.741191874" lastFinishedPulling="2025-11-21 19:20:43.861090526 +0000 UTC m=+1134.646230553" observedRunningTime="2025-11-21 19:20:45.846675759 +0000 UTC m=+1136.631815786" watchObservedRunningTime="2025-11-21 19:20:45.877251799 +0000 UTC m=+1136.662391826" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.968343 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d332f3a-bf5e-4154-93c9-36265e2b9a3d" path="/var/lib/kubelet/pods/1d332f3a-bf5e-4154-93c9-36265e2b9a3d/volumes" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.969246 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" path="/var/lib/kubelet/pods/b3255fe0-3c69-4ce9-a9e7-823c35dcebbe/volumes" Nov 21 19:20:45 crc kubenswrapper[4701]: I1121 19:20:45.969952 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" path="/var/lib/kubelet/pods/f658fc90-2c53-4cdd-b411-16ccb58f7625/volumes" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.482373 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.546982 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.824076 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.825631 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.833980 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.841039 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.841735 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.845562 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-kjvsb" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.900549 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6","Type":"ContainerStarted","Data":"e5dd62460d381b928eb7d0e39d1a32367e929fc7e8a19f0e97ec53f0e28f9a3e"} Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.903328 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4b72befc-936a-4833-8e7a-f765c655a300","Type":"ContainerStarted","Data":"9a4b2177cd1f69718eeecf5c93680b4cc9ed689a527315357e4786767c842823"} Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.913722 4701 generic.go:334] "Generic (PLEG): container finished" podID="ffb1df83-0092-42e4-885f-e934786a503b" containerID="cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27" exitCode=0 Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.913751 4701 generic.go:334] "Generic (PLEG): container finished" podID="ffb1df83-0092-42e4-885f-e934786a503b" containerID="bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e" exitCode=2 Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.913780 4701 generic.go:334] "Generic (PLEG): container finished" podID="ffb1df83-0092-42e4-885f-e934786a503b" containerID="b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be" exitCode=0 Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.913822 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerDied","Data":"cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27"} Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.913860 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerDied","Data":"bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e"} Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.913872 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerDied","Data":"b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be"} Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.927347 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07d6bc32-28a2-40bf-bd2e-9454dfaac91f","Type":"ContainerStarted","Data":"803760a56a2a27c3e07d50ac5e35e05f32afa6e79b2155c799be6eb22e06c580"} Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.927389 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.952438 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=12.952417973 podStartE2EDuration="12.952417973s" podCreationTimestamp="2025-11-21 19:20:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:46.93550698 +0000 UTC m=+1137.720647007" watchObservedRunningTime="2025-11-21 19:20:46.952417973 +0000 UTC m=+1137.737557990" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.956232 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxgtr\" (UniqueName: \"kubernetes.io/projected/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-kube-api-access-zxgtr\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.956335 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-openstack-config-secret\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.956361 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:46 crc kubenswrapper[4701]: I1121 19:20:46.956397 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-openstack-config\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.040601 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.059082 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-openstack-config-secret\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.059167 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.059255 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-openstack-config\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.059380 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxgtr\" (UniqueName: \"kubernetes.io/projected/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-kube-api-access-zxgtr\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.064742 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-openstack-config\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.067944 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-openstack-config-secret\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.068844 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.089255 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxgtr\" (UniqueName: \"kubernetes.io/projected/3cabfe57-5d37-4a59-93e3-aac4836f7d2c-kube-api-access-zxgtr\") pod \"openstackclient\" (UID: \"3cabfe57-5d37-4a59-93e3-aac4836f7d2c\") " pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.142124 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.571346 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.749080 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-645bbc848c-zl97c" podUID="b3255fe0-3c69-4ce9-a9e7-823c35dcebbe" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: i/o timeout" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.891522 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.967441 4701 generic.go:334] "Generic (PLEG): container finished" podID="ffb1df83-0092-42e4-885f-e934786a503b" containerID="a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93" exitCode=0 Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.967631 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.974084 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"3cabfe57-5d37-4a59-93e3-aac4836f7d2c","Type":"ContainerStarted","Data":"95d190ecb4529a82699d42d08add2dfb01472dc85b6cd05f7aaba2aa50d96aba"} Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.974146 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerDied","Data":"a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93"} Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.974174 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb1df83-0092-42e4-885f-e934786a503b","Type":"ContainerDied","Data":"b80c501315d4edd818b2de94e8f88c34d23c8c03e0ec67294749c3b63c718398"} Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.974219 4701 scope.go:117] "RemoveContainer" containerID="cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.974582 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07d6bc32-28a2-40bf-bd2e-9454dfaac91f","Type":"ContainerStarted","Data":"51cd7ff1d8dd2ee7688bd11d6def9bbfc0bf26e2008f3397f468491c8935dae4"} Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.993831 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr9pz\" (UniqueName: \"kubernetes.io/projected/ffb1df83-0092-42e4-885f-e934786a503b-kube-api-access-sr9pz\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.994043 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-run-httpd\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.994213 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-combined-ca-bundle\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.994259 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-sg-core-conf-yaml\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.994710 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-scripts\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.994785 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-config-data\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.994808 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-log-httpd\") pod \"ffb1df83-0092-42e4-885f-e934786a503b\" (UID: \"ffb1df83-0092-42e4-885f-e934786a503b\") " Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.995277 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:47 crc kubenswrapper[4701]: I1121 19:20:47.997092 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.002213 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4b72befc-936a-4833-8e7a-f765c655a300","Type":"ContainerStarted","Data":"b137fb7e253db687c0d74d681e18b448c2b99f12af937d094668a57d84b3d155"} Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.005639 4701 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.012898 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-scripts" (OuterVolumeSpecName: "scripts") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.020605 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.020572731 podStartE2EDuration="6.020572731s" podCreationTimestamp="2025-11-21 19:20:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:48.007622073 +0000 UTC m=+1138.792762100" watchObservedRunningTime="2025-11-21 19:20:48.020572731 +0000 UTC m=+1138.805712758" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.030441 4701 scope.go:117] "RemoveContainer" containerID="bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.051965 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffb1df83-0092-42e4-885f-e934786a503b-kube-api-access-sr9pz" (OuterVolumeSpecName: "kube-api-access-sr9pz") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "kube-api-access-sr9pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.056583 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.095430 4701 scope.go:117] "RemoveContainer" containerID="a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.114516 4701 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.114554 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.114567 4701 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb1df83-0092-42e4-885f-e934786a503b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.114578 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr9pz\" (UniqueName: \"kubernetes.io/projected/ffb1df83-0092-42e4-885f-e934786a503b-kube-api-access-sr9pz\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.139008 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.165528 4701 scope.go:117] "RemoveContainer" containerID="b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.202738 4701 scope.go:117] "RemoveContainer" containerID="cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.204106 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27\": container with ID starting with cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27 not found: ID does not exist" containerID="cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.204172 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27"} err="failed to get container status \"cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27\": rpc error: code = NotFound desc = could not find container \"cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27\": container with ID starting with cc74f4c171621f6e28d86021d565279b267b339adb7957e0da0213b2df8b7d27 not found: ID does not exist" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.204233 4701 scope.go:117] "RemoveContainer" containerID="bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.206393 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e\": container with ID starting with bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e not found: ID does not exist" containerID="bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.206448 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e"} err="failed to get container status \"bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e\": rpc error: code = NotFound desc = could not find container \"bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e\": container with ID starting with bd9c874fdbe44d1bdec553997416a9c3856a7f215901e4170d1f23cddff5b52e not found: ID does not exist" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.206483 4701 scope.go:117] "RemoveContainer" containerID="a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.207169 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93\": container with ID starting with a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93 not found: ID does not exist" containerID="a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.207213 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93"} err="failed to get container status \"a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93\": rpc error: code = NotFound desc = could not find container \"a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93\": container with ID starting with a57b2c46539d0f42fe1f128aed5b54f7ab4329c8a9a79057cdda67e3ad6eca93 not found: ID does not exist" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.207230 4701 scope.go:117] "RemoveContainer" containerID="b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.207960 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be\": container with ID starting with b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be not found: ID does not exist" containerID="b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.207987 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be"} err="failed to get container status \"b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be\": rpc error: code = NotFound desc = could not find container \"b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be\": container with ID starting with b4dc83dd71334cd762e2fee8ecd2a89fcc95bee70b97f3f0ec5203458453d5be not found: ID does not exist" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.212072 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-config-data" (OuterVolumeSpecName: "config-data") pod "ffb1df83-0092-42e4-885f-e934786a503b" (UID: "ffb1df83-0092-42e4-885f-e934786a503b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.217299 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.217336 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb1df83-0092-42e4-885f-e934786a503b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.383175 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.392523 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.405929 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.406385 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="sg-core" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406404 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="sg-core" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.406422 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="proxy-httpd" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406430 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="proxy-httpd" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.406459 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-central-agent" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406466 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-central-agent" Nov 21 19:20:48 crc kubenswrapper[4701]: E1121 19:20:48.406485 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-notification-agent" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406490 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-notification-agent" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406656 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="proxy-httpd" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406684 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="sg-core" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406698 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-notification-agent" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.406710 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb1df83-0092-42e4-885f-e934786a503b" containerName="ceilometer-central-agent" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.410170 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.416943 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.417125 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.430486 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.525740 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-scripts\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.525802 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-run-httpd\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.525836 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.525862 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-config-data\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.525924 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-log-httpd\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.526007 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78pc9\" (UniqueName: \"kubernetes.io/projected/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-kube-api-access-78pc9\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.526071 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.613520 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.613614 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.628943 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78pc9\" (UniqueName: \"kubernetes.io/projected/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-kube-api-access-78pc9\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629036 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629131 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-scripts\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629159 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-run-httpd\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629183 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629230 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-config-data\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629261 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-log-httpd\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.629753 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-log-httpd\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.630630 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-run-httpd\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.637187 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-config-data\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.638296 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.638392 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c5f685fb-t5wpk" podUID="f658fc90-2c53-4cdd-b411-16ccb58f7625" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.172:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.639053 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.640904 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.653863 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-scripts\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.654412 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78pc9\" (UniqueName: \"kubernetes.io/projected/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-kube-api-access-78pc9\") pod \"ceilometer-0\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " pod="openstack/ceilometer-0" Nov 21 19:20:48 crc kubenswrapper[4701]: I1121 19:20:48.791443 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:20:49 crc kubenswrapper[4701]: I1121 19:20:49.024859 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4b72befc-936a-4833-8e7a-f765c655a300","Type":"ContainerStarted","Data":"f2cd5c794e0a46e020429e8b5c3e9462953feeae6977f6eb4f3daee36455f35f"} Nov 21 19:20:49 crc kubenswrapper[4701]: I1121 19:20:49.056093 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.056074142 podStartE2EDuration="5.056074142s" podCreationTimestamp="2025-11-21 19:20:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:20:49.048248633 +0000 UTC m=+1139.833388660" watchObservedRunningTime="2025-11-21 19:20:49.056074142 +0000 UTC m=+1139.841214169" Nov 21 19:20:49 crc kubenswrapper[4701]: I1121 19:20:49.370827 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:20:49 crc kubenswrapper[4701]: I1121 19:20:49.997501 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffb1df83-0092-42e4-885f-e934786a503b" path="/var/lib/kubelet/pods/ffb1df83-0092-42e4-885f-e934786a503b/volumes" Nov 21 19:20:50 crc kubenswrapper[4701]: I1121 19:20:50.058616 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerStarted","Data":"474e911f407d22c07404d4e69d719852640288b52eb303b73920620acb6525d8"} Nov 21 19:20:50 crc kubenswrapper[4701]: I1121 19:20:50.058663 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerStarted","Data":"06e89e7d0494414848945b951e909773941a341099578ae5549553c376e7b1f5"} Nov 21 19:20:50 crc kubenswrapper[4701]: I1121 19:20:50.058675 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerStarted","Data":"657ffd97c0d2b68e57ba2428f9ac092ac53b3d1b69a71c0fb584c8e545192e37"} Nov 21 19:20:50 crc kubenswrapper[4701]: I1121 19:20:50.318008 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 21 19:20:51 crc kubenswrapper[4701]: I1121 19:20:51.079888 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerStarted","Data":"f3add8d4bff89e836b2fd97cf25939490c4102f39704e0daad28292181145d83"} Nov 21 19:20:51 crc kubenswrapper[4701]: I1121 19:20:51.083131 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a7a5be4-96a4-4574-9839-2d0576595305" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" exitCode=1 Nov 21 19:20:51 crc kubenswrapper[4701]: I1121 19:20:51.083977 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerDied","Data":"a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6"} Nov 21 19:20:51 crc kubenswrapper[4701]: I1121 19:20:51.084023 4701 scope.go:117] "RemoveContainer" containerID="ee2807df27d43a686f08c63a0b57e104222964e1ac803446173636918d039a8b" Nov 21 19:20:51 crc kubenswrapper[4701]: I1121 19:20:51.084347 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:20:51 crc kubenswrapper[4701]: E1121 19:20:51.084541 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.108221 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerStarted","Data":"5c378b50c25aeb0425b035b32f243b229d7fd8cad7ccf1734406be38c6d7afca"} Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.108779 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.134353 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.657364022 podStartE2EDuration="5.134333859s" podCreationTimestamp="2025-11-21 19:20:48 +0000 UTC" firstStartedPulling="2025-11-21 19:20:49.389135721 +0000 UTC m=+1140.174275768" lastFinishedPulling="2025-11-21 19:20:51.866105578 +0000 UTC m=+1142.651245605" observedRunningTime="2025-11-21 19:20:53.133405075 +0000 UTC m=+1143.918545102" watchObservedRunningTime="2025-11-21 19:20:53.134333859 +0000 UTC m=+1143.919473886" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.317370 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.317435 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.373862 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.392739 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.411714 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-66cbbc6b59-4jhxd"] Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.413809 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.416236 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.416484 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.417110 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.464831 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-66cbbc6b59-4jhxd"] Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.555760 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/567ed826-1db0-4018-b4ea-8af42596aa3e-run-httpd\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.555810 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/567ed826-1db0-4018-b4ea-8af42596aa3e-log-httpd\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.555868 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-internal-tls-certs\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.555908 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-public-tls-certs\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.555929 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh2cn\" (UniqueName: \"kubernetes.io/projected/567ed826-1db0-4018-b4ea-8af42596aa3e-kube-api-access-gh2cn\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.556022 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-combined-ca-bundle\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.556060 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/567ed826-1db0-4018-b4ea-8af42596aa3e-etc-swift\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.556088 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-config-data\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660122 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-combined-ca-bundle\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660220 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/567ed826-1db0-4018-b4ea-8af42596aa3e-etc-swift\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660260 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-config-data\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660341 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/567ed826-1db0-4018-b4ea-8af42596aa3e-run-httpd\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660367 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/567ed826-1db0-4018-b4ea-8af42596aa3e-log-httpd\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660430 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-internal-tls-certs\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660452 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-public-tls-certs\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.660471 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh2cn\" (UniqueName: \"kubernetes.io/projected/567ed826-1db0-4018-b4ea-8af42596aa3e-kube-api-access-gh2cn\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.662793 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/567ed826-1db0-4018-b4ea-8af42596aa3e-run-httpd\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.663451 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/567ed826-1db0-4018-b4ea-8af42596aa3e-log-httpd\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.669518 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-config-data\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.671001 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-internal-tls-certs\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.685185 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-combined-ca-bundle\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.690617 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/567ed826-1db0-4018-b4ea-8af42596aa3e-etc-swift\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.691551 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed826-1db0-4018-b4ea-8af42596aa3e-public-tls-certs\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.694942 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh2cn\" (UniqueName: \"kubernetes.io/projected/567ed826-1db0-4018-b4ea-8af42596aa3e-kube-api-access-gh2cn\") pod \"swift-proxy-66cbbc6b59-4jhxd\" (UID: \"567ed826-1db0-4018-b4ea-8af42596aa3e\") " pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:53 crc kubenswrapper[4701]: I1121 19:20:53.735230 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:20:54 crc kubenswrapper[4701]: I1121 19:20:54.116788 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 19:20:54 crc kubenswrapper[4701]: I1121 19:20:54.117338 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 19:20:54 crc kubenswrapper[4701]: I1121 19:20:54.465612 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-66cbbc6b59-4jhxd"] Nov 21 19:20:54 crc kubenswrapper[4701]: I1121 19:20:54.529972 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:20:54 crc kubenswrapper[4701]: I1121 19:20:54.956569 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:54 crc kubenswrapper[4701]: I1121 19:20:54.956628 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.007010 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.014334 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.142995 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-central-agent" containerID="cri-o://06e89e7d0494414848945b951e909773941a341099578ae5549553c376e7b1f5" gracePeriod=30 Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.143544 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="sg-core" containerID="cri-o://f3add8d4bff89e836b2fd97cf25939490c4102f39704e0daad28292181145d83" gracePeriod=30 Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.143689 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="proxy-httpd" containerID="cri-o://5c378b50c25aeb0425b035b32f243b229d7fd8cad7ccf1734406be38c6d7afca" gracePeriod=30 Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.143758 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-notification-agent" containerID="cri-o://474e911f407d22c07404d4e69d719852640288b52eb303b73920620acb6525d8" gracePeriod=30 Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.148237 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.148289 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.513650 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 21 19:20:55 crc kubenswrapper[4701]: I1121 19:20:55.670833 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-869574dbc6-l96tx" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.158:8443: connect: connection refused" Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161457 4701 generic.go:334] "Generic (PLEG): container finished" podID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerID="5c378b50c25aeb0425b035b32f243b229d7fd8cad7ccf1734406be38c6d7afca" exitCode=0 Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161493 4701 generic.go:334] "Generic (PLEG): container finished" podID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerID="f3add8d4bff89e836b2fd97cf25939490c4102f39704e0daad28292181145d83" exitCode=2 Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161504 4701 generic.go:334] "Generic (PLEG): container finished" podID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerID="474e911f407d22c07404d4e69d719852640288b52eb303b73920620acb6525d8" exitCode=0 Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161545 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerDied","Data":"5c378b50c25aeb0425b035b32f243b229d7fd8cad7ccf1734406be38c6d7afca"} Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161613 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerDied","Data":"f3add8d4bff89e836b2fd97cf25939490c4102f39704e0daad28292181145d83"} Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161628 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerDied","Data":"474e911f407d22c07404d4e69d719852640288b52eb303b73920620acb6525d8"} Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161814 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.161828 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.482043 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.482804 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.482857 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:56 crc kubenswrapper[4701]: I1121 19:20:56.482886 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:20:56 crc kubenswrapper[4701]: E1121 19:20:56.483165 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:20:57 crc kubenswrapper[4701]: I1121 19:20:57.175135 4701 generic.go:334] "Generic (PLEG): container finished" podID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerID="06e89e7d0494414848945b951e909773941a341099578ae5549553c376e7b1f5" exitCode=0 Nov 21 19:20:57 crc kubenswrapper[4701]: I1121 19:20:57.175222 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerDied","Data":"06e89e7d0494414848945b951e909773941a341099578ae5549553c376e7b1f5"} Nov 21 19:20:57 crc kubenswrapper[4701]: I1121 19:20:57.176358 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:20:57 crc kubenswrapper[4701]: E1121 19:20:57.176666 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:20:58 crc kubenswrapper[4701]: I1121 19:20:58.000556 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:58 crc kubenswrapper[4701]: I1121 19:20:58.000911 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:20:58 crc kubenswrapper[4701]: I1121 19:20:58.005625 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 19:20:58 crc kubenswrapper[4701]: I1121 19:20:58.005709 4701 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 19:20:58 crc kubenswrapper[4701]: I1121 19:20:58.006337 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 19:20:58 crc kubenswrapper[4701]: I1121 19:20:58.007971 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.328964 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-xvfz2"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.330790 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.353614 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xvfz2"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.384630 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf31154f-0e27-4b6c-889c-608d4a6aaf41-operator-scripts\") pod \"nova-api-db-create-xvfz2\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.384735 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwnbx\" (UniqueName: \"kubernetes.io/projected/cf31154f-0e27-4b6c-889c-608d4a6aaf41-kube-api-access-wwnbx\") pod \"nova-api-db-create-xvfz2\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.486771 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf31154f-0e27-4b6c-889c-608d4a6aaf41-operator-scripts\") pod \"nova-api-db-create-xvfz2\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.486832 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwnbx\" (UniqueName: \"kubernetes.io/projected/cf31154f-0e27-4b6c-889c-608d4a6aaf41-kube-api-access-wwnbx\") pod \"nova-api-db-create-xvfz2\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.488153 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf31154f-0e27-4b6c-889c-608d4a6aaf41-operator-scripts\") pod \"nova-api-db-create-xvfz2\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.533101 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-g5rt5"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.534876 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.535914 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwnbx\" (UniqueName: \"kubernetes.io/projected/cf31154f-0e27-4b6c-889c-608d4a6aaf41-kube-api-access-wwnbx\") pod \"nova-api-db-create-xvfz2\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.542211 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-054e-account-create-4x7l8"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.543748 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.546542 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.552806 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-g5rt5"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.562188 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-054e-account-create-4x7l8"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.589950 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsjvk\" (UniqueName: \"kubernetes.io/projected/0eb288c4-5e71-4918-b69c-918bc5fa4bee-kube-api-access-hsjvk\") pod \"nova-api-054e-account-create-4x7l8\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.590057 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ptnt\" (UniqueName: \"kubernetes.io/projected/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-kube-api-access-9ptnt\") pod \"nova-cell0-db-create-g5rt5\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.590165 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0eb288c4-5e71-4918-b69c-918bc5fa4bee-operator-scripts\") pod \"nova-api-054e-account-create-4x7l8\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.590248 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-operator-scripts\") pod \"nova-cell0-db-create-g5rt5\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.645618 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-xcwpl"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.647308 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.664032 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xcwpl"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.692428 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldcpg\" (UniqueName: \"kubernetes.io/projected/a7b15c44-532c-4df9-aaec-d0fee8594570-kube-api-access-ldcpg\") pod \"nova-cell1-db-create-xcwpl\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.692491 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0eb288c4-5e71-4918-b69c-918bc5fa4bee-operator-scripts\") pod \"nova-api-054e-account-create-4x7l8\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.692557 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-operator-scripts\") pod \"nova-cell0-db-create-g5rt5\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.693076 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7b15c44-532c-4df9-aaec-d0fee8594570-operator-scripts\") pod \"nova-cell1-db-create-xcwpl\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.693134 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsjvk\" (UniqueName: \"kubernetes.io/projected/0eb288c4-5e71-4918-b69c-918bc5fa4bee-kube-api-access-hsjvk\") pod \"nova-api-054e-account-create-4x7l8\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.693399 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ptnt\" (UniqueName: \"kubernetes.io/projected/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-kube-api-access-9ptnt\") pod \"nova-cell0-db-create-g5rt5\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.694052 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0eb288c4-5e71-4918-b69c-918bc5fa4bee-operator-scripts\") pod \"nova-api-054e-account-create-4x7l8\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.694398 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-operator-scripts\") pod \"nova-cell0-db-create-g5rt5\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.707329 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.716493 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ptnt\" (UniqueName: \"kubernetes.io/projected/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-kube-api-access-9ptnt\") pod \"nova-cell0-db-create-g5rt5\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.726962 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-0ce8-account-create-pzhxk"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.727855 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsjvk\" (UniqueName: \"kubernetes.io/projected/0eb288c4-5e71-4918-b69c-918bc5fa4bee-kube-api-access-hsjvk\") pod \"nova-api-054e-account-create-4x7l8\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.728900 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.730558 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.743222 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-0ce8-account-create-pzhxk"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.796243 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7b15c44-532c-4df9-aaec-d0fee8594570-operator-scripts\") pod \"nova-cell1-db-create-xcwpl\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.796312 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-operator-scripts\") pod \"nova-cell0-0ce8-account-create-pzhxk\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.796406 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h52gt\" (UniqueName: \"kubernetes.io/projected/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-kube-api-access-h52gt\") pod \"nova-cell0-0ce8-account-create-pzhxk\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.796459 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldcpg\" (UniqueName: \"kubernetes.io/projected/a7b15c44-532c-4df9-aaec-d0fee8594570-kube-api-access-ldcpg\") pod \"nova-cell1-db-create-xcwpl\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.797506 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7b15c44-532c-4df9-aaec-d0fee8594570-operator-scripts\") pod \"nova-cell1-db-create-xcwpl\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.816440 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldcpg\" (UniqueName: \"kubernetes.io/projected/a7b15c44-532c-4df9-aaec-d0fee8594570-kube-api-access-ldcpg\") pod \"nova-cell1-db-create-xcwpl\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.898281 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-operator-scripts\") pod \"nova-cell0-0ce8-account-create-pzhxk\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.898718 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h52gt\" (UniqueName: \"kubernetes.io/projected/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-kube-api-access-h52gt\") pod \"nova-cell0-0ce8-account-create-pzhxk\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.899242 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-operator-scripts\") pod \"nova-cell0-0ce8-account-create-pzhxk\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.917155 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.925584 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h52gt\" (UniqueName: \"kubernetes.io/projected/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-kube-api-access-h52gt\") pod \"nova-cell0-0ce8-account-create-pzhxk\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.936235 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-d84d-account-create-nx659"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.938278 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.941285 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.955017 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.975458 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d84d-account-create-nx659"] Nov 21 19:21:01 crc kubenswrapper[4701]: I1121 19:21:01.979314 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.000867 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrj95\" (UniqueName: \"kubernetes.io/projected/36850367-a3b5-4f05-9ac6-223e900ab01e-kube-api-access-nrj95\") pod \"nova-cell1-d84d-account-create-nx659\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.001036 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36850367-a3b5-4f05-9ac6-223e900ab01e-operator-scripts\") pod \"nova-cell1-d84d-account-create-nx659\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.104635 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36850367-a3b5-4f05-9ac6-223e900ab01e-operator-scripts\") pod \"nova-cell1-d84d-account-create-nx659\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.105409 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrj95\" (UniqueName: \"kubernetes.io/projected/36850367-a3b5-4f05-9ac6-223e900ab01e-kube-api-access-nrj95\") pod \"nova-cell1-d84d-account-create-nx659\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.105300 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36850367-a3b5-4f05-9ac6-223e900ab01e-operator-scripts\") pod \"nova-cell1-d84d-account-create-nx659\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.114842 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.124093 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrj95\" (UniqueName: \"kubernetes.io/projected/36850367-a3b5-4f05-9ac6-223e900ab01e-kube-api-access-nrj95\") pod \"nova-cell1-d84d-account-create-nx659\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.288703 4701 generic.go:334] "Generic (PLEG): container finished" podID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerID="3b60c6eea761331e4a9cda0780211b6fe8bd85b1a6f04811e2ec82a1b5b3b868" exitCode=137 Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.288755 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"025b9b80-a0fc-4f59-b25b-d541738c8dfc","Type":"ContainerDied","Data":"3b60c6eea761331e4a9cda0780211b6fe8bd85b1a6f04811e2ec82a1b5b3b868"} Nov 21 19:21:02 crc kubenswrapper[4701]: I1121 19:21:02.351865 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.848494 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.968944 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-scripts\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.969450 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.970141 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-combined-ca-bundle\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.970284 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/025b9b80-a0fc-4f59-b25b-d541738c8dfc-logs\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.970381 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmplx\" (UniqueName: \"kubernetes.io/projected/025b9b80-a0fc-4f59-b25b-d541738c8dfc-kube-api-access-lmplx\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.970462 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data-custom\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.970501 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/025b9b80-a0fc-4f59-b25b-d541738c8dfc-etc-machine-id\") pod \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\" (UID: \"025b9b80-a0fc-4f59-b25b-d541738c8dfc\") " Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.971087 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/025b9b80-a0fc-4f59-b25b-d541738c8dfc-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.974283 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/025b9b80-a0fc-4f59-b25b-d541738c8dfc-logs" (OuterVolumeSpecName: "logs") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.996518 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-scripts" (OuterVolumeSpecName: "scripts") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:03 crc kubenswrapper[4701]: I1121 19:21:03.997421 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/025b9b80-a0fc-4f59-b25b-d541738c8dfc-kube-api-access-lmplx" (OuterVolumeSpecName: "kube-api-access-lmplx") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "kube-api-access-lmplx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.000782 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.046249 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.047537 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.073502 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmplx\" (UniqueName: \"kubernetes.io/projected/025b9b80-a0fc-4f59-b25b-d541738c8dfc-kube-api-access-lmplx\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.073534 4701 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.073544 4701 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/025b9b80-a0fc-4f59-b25b-d541738c8dfc-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.073554 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.073566 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.073577 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/025b9b80-a0fc-4f59-b25b-d541738c8dfc-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.152042 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data" (OuterVolumeSpecName: "config-data") pod "025b9b80-a0fc-4f59-b25b-d541738c8dfc" (UID: "025b9b80-a0fc-4f59-b25b-d541738c8dfc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.174851 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-config-data\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.174999 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-combined-ca-bundle\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.175140 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-log-httpd\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.175172 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78pc9\" (UniqueName: \"kubernetes.io/projected/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-kube-api-access-78pc9\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.175359 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-run-httpd\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.175453 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-scripts\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.175532 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-sg-core-conf-yaml\") pod \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\" (UID: \"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3\") " Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.176054 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.176109 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025b9b80-a0fc-4f59-b25b-d541738c8dfc-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.176391 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.198822 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-scripts" (OuterVolumeSpecName: "scripts") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.207390 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-kube-api-access-78pc9" (OuterVolumeSpecName: "kube-api-access-78pc9") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "kube-api-access-78pc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.261693 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.280049 4701 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.280079 4701 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.280093 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78pc9\" (UniqueName: \"kubernetes.io/projected/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-kube-api-access-78pc9\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.280103 4701 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.280112 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.376062 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" event={"ID":"567ed826-1db0-4018-b4ea-8af42596aa3e","Type":"ContainerStarted","Data":"01375f34ca09202c30a44fc4dceb682a3465129617b4858416734373227bff5f"} Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.376109 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" event={"ID":"567ed826-1db0-4018-b4ea-8af42596aa3e","Type":"ContainerStarted","Data":"15f4659e43f1e6ef2114bece01d144c54ee626660becdf98b6c8829e0fc9eebc"} Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.382275 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.385845 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"025b9b80-a0fc-4f59-b25b-d541738c8dfc","Type":"ContainerDied","Data":"735f9a47d8ebea7bf324344e45152e4cdde55a3f9bab40e2a1b1b17c9395d2cd"} Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.386334 4701 scope.go:117] "RemoveContainer" containerID="3b60c6eea761331e4a9cda0780211b6fe8bd85b1a6f04811e2ec82a1b5b3b868" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.385921 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.387070 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.408030 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fa0f18-f3b5-4a0d-a709-190d24e34ed3","Type":"ContainerDied","Data":"657ffd97c0d2b68e57ba2428f9ac092ac53b3d1b69a71c0fb584c8e545192e37"} Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.408296 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.439034 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.442172 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-config-data" (OuterVolumeSpecName: "config-data") pod "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" (UID: "f8fa0f18-f3b5-4a0d-a709-190d24e34ed3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.460683 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470329 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: E1121 19:21:04.470840 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-central-agent" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470855 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-central-agent" Nov 21 19:21:04 crc kubenswrapper[4701]: E1121 19:21:04.470876 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-notification-agent" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470883 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-notification-agent" Nov 21 19:21:04 crc kubenswrapper[4701]: E1121 19:21:04.470907 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api-log" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470914 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api-log" Nov 21 19:21:04 crc kubenswrapper[4701]: E1121 19:21:04.470927 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="sg-core" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470933 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="sg-core" Nov 21 19:21:04 crc kubenswrapper[4701]: E1121 19:21:04.470949 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="proxy-httpd" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470955 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="proxy-httpd" Nov 21 19:21:04 crc kubenswrapper[4701]: E1121 19:21:04.470968 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.470974 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.471149 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="sg-core" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.471161 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="proxy-httpd" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.471174 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-central-agent" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.471187 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" containerName="ceilometer-notification-agent" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.471240 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api-log" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.471252 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" containerName="cinder-api" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.472909 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.476555 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.476802 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.484024 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.485499 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.489178 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.492221 4701 scope.go:117] "RemoveContainer" containerID="8dca0052c45fdcff2fbb90fa10fa06ea3dc235c51395308f5f61e5425b9c36d8" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.546699 4701 scope.go:117] "RemoveContainer" containerID="5c378b50c25aeb0425b035b32f243b229d7fd8cad7ccf1734406be38c6d7afca" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591661 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-public-tls-certs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591723 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591748 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-logs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591778 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjthg\" (UniqueName: \"kubernetes.io/projected/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-kube-api-access-zjthg\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591827 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-config-data-custom\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591848 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-config-data\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591885 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-scripts\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591905 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.591948 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.627687 4701 scope.go:117] "RemoveContainer" containerID="f3add8d4bff89e836b2fd97cf25939490c4102f39704e0daad28292181145d83" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.683832 4701 scope.go:117] "RemoveContainer" containerID="474e911f407d22c07404d4e69d719852640288b52eb303b73920620acb6525d8" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693401 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693444 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-logs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693475 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjthg\" (UniqueName: \"kubernetes.io/projected/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-kube-api-access-zjthg\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693530 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-config-data-custom\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693551 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-config-data\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693587 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-scripts\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693608 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693668 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.693708 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-public-tls-certs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.694605 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-logs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.698257 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.702437 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-config-data-custom\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.707710 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-scripts\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.708170 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.708361 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.708535 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-public-tls-certs\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.713648 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-config-data\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.731943 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjthg\" (UniqueName: \"kubernetes.io/projected/83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f-kube-api-access-zjthg\") pod \"cinder-api-0\" (UID: \"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f\") " pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.738999 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-0ce8-account-create-pzhxk"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.808047 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.824234 4701 scope.go:117] "RemoveContainer" containerID="06e89e7d0494414848945b951e909773941a341099578ae5549553c376e7b1f5" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.832802 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-g5rt5"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.858673 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: W1121 19:21:04.883259 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod924bddf4_8ec7_4d35_b3ec_1bf4ff8b5502.slice/crio-63b03d5f268e0b5914bbbf49199751308722f2b1eee03bc3b1e85b78371cbae5 WatchSource:0}: Error finding container 63b03d5f268e0b5914bbbf49199751308722f2b1eee03bc3b1e85b78371cbae5: Status 404 returned error can't find the container with id 63b03d5f268e0b5914bbbf49199751308722f2b1eee03bc3b1e85b78371cbae5 Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.891303 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.925298 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.927913 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.931490 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.940074 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xcwpl"] Nov 21 19:21:04 crc kubenswrapper[4701]: I1121 19:21:04.941184 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.008982 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-log-httpd\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.009109 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.009233 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-run-httpd\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.009883 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg6vq\" (UniqueName: \"kubernetes.io/projected/c236db90-f438-443f-8ec3-ac0b599bfbc0-kube-api-access-mg6vq\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.011833 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-config-data\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.012071 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.012327 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-scripts\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.050351 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.059239 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-054e-account-create-4x7l8"] Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.068305 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d84d-account-create-nx659"] Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.077145 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xvfz2"] Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115227 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-scripts\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115360 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-log-httpd\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115392 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115444 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-run-httpd\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115476 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg6vq\" (UniqueName: \"kubernetes.io/projected/c236db90-f438-443f-8ec3-ac0b599bfbc0-kube-api-access-mg6vq\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115535 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-config-data\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.115597 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.120558 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-run-httpd\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.122852 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-log-httpd\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.127071 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-scripts\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.127767 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.130061 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-config-data\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.136837 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg6vq\" (UniqueName: \"kubernetes.io/projected/c236db90-f438-443f-8ec3-ac0b599bfbc0-kube-api-access-mg6vq\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.140091 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.302436 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.486327 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-054e-account-create-4x7l8" event={"ID":"0eb288c4-5e71-4918-b69c-918bc5fa4bee","Type":"ContainerStarted","Data":"9b3f49cc2daa980d6619256591dc71beede47403a922431232b14142f0a6ab32"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.506294 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"3cabfe57-5d37-4a59-93e3-aac4836f7d2c","Type":"ContainerStarted","Data":"1f717baeabad2d60ca160dce41f4463dfff7e478ec95605a1141de87b57517dd"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.519354 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d84d-account-create-nx659" event={"ID":"36850367-a3b5-4f05-9ac6-223e900ab01e","Type":"ContainerStarted","Data":"1659107a34c9eb2dd333ed47e2837c2049ad0c4dd128e95ddccb26600bc55e9c"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.534845 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.468864569 podStartE2EDuration="19.534829105s" podCreationTimestamp="2025-11-21 19:20:46 +0000 UTC" firstStartedPulling="2025-11-21 19:20:47.602382048 +0000 UTC m=+1138.387522075" lastFinishedPulling="2025-11-21 19:21:03.668346584 +0000 UTC m=+1154.453486611" observedRunningTime="2025-11-21 19:21:05.533964521 +0000 UTC m=+1156.319104548" watchObservedRunningTime="2025-11-21 19:21:05.534829105 +0000 UTC m=+1156.319969132" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.606551 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" event={"ID":"567ed826-1db0-4018-b4ea-8af42596aa3e","Type":"ContainerStarted","Data":"15cda048a1df3541c3e220c9dbd8d0002c5cea24ab9983628b8231838ac459f0"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.607426 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.607476 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.640189 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-g5rt5" event={"ID":"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502","Type":"ContainerStarted","Data":"63b03d5f268e0b5914bbbf49199751308722f2b1eee03bc3b1e85b78371cbae5"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.669507 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.669845 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-869574dbc6-l96tx" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.158:8443: connect: connection refused" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.670332 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xvfz2" event={"ID":"cf31154f-0e27-4b6c-889c-608d4a6aaf41","Type":"ContainerStarted","Data":"5180d1044c6b8be8d6e5438af03cc9f5cb692aa0999749f685b5975f1c95b1cf"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.698063 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.698377 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" event={"ID":"b9e5386f-281c-4a6a-bbe4-0b1f15b82869","Type":"ContainerStarted","Data":"fac783339a1ed332dab93147ff38773b5f122da9057a51acb39a36523405e0d6"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.698431 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" event={"ID":"b9e5386f-281c-4a6a-bbe4-0b1f15b82869","Type":"ContainerStarted","Data":"784ac7756d65b7c8473804ef82a8e2b9c77bb53d0f44c3fea39bd9caf8a2561d"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.701555 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" podStartSLOduration=12.701516653 podStartE2EDuration="12.701516653s" podCreationTimestamp="2025-11-21 19:20:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:05.683638115 +0000 UTC m=+1156.468778132" watchObservedRunningTime="2025-11-21 19:21:05.701516653 +0000 UTC m=+1156.486656680" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.711983 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xcwpl" event={"ID":"a7b15c44-532c-4df9-aaec-d0fee8594570","Type":"ContainerStarted","Data":"adc515ba00d88247ae6e47a75bfc21ef31090d649e291f36c1e7e602a9ebf321"} Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.784041 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" podStartSLOduration=4.784013986 podStartE2EDuration="4.784013986s" podCreationTimestamp="2025-11-21 19:21:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:05.772069475 +0000 UTC m=+1156.557209502" watchObservedRunningTime="2025-11-21 19:21:05.784013986 +0000 UTC m=+1156.569154013" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.989867 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="025b9b80-a0fc-4f59-b25b-d541738c8dfc" path="/var/lib/kubelet/pods/025b9b80-a0fc-4f59-b25b-d541738c8dfc/volumes" Nov 21 19:21:05 crc kubenswrapper[4701]: I1121 19:21:05.990974 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8fa0f18-f3b5-4a0d-a709-190d24e34ed3" path="/var/lib/kubelet/pods/f8fa0f18-f3b5-4a0d-a709-190d24e34ed3/volumes" Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.175614 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.736191 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xvfz2" event={"ID":"cf31154f-0e27-4b6c-889c-608d4a6aaf41","Type":"ContainerStarted","Data":"9ea2d67465568d28afaed8a1e9f21d622ec1447f1387f2b485274e2b58b2beb5"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.740394 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d84d-account-create-nx659" event={"ID":"36850367-a3b5-4f05-9ac6-223e900ab01e","Type":"ContainerStarted","Data":"5fd4bdb33bff2bd7058f9f93b26328f9db1b94f668e4eb96a54670221212c936"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.743339 4701 generic.go:334] "Generic (PLEG): container finished" podID="b9e5386f-281c-4a6a-bbe4-0b1f15b82869" containerID="fac783339a1ed332dab93147ff38773b5f122da9057a51acb39a36523405e0d6" exitCode=0 Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.743409 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" event={"ID":"b9e5386f-281c-4a6a-bbe4-0b1f15b82869","Type":"ContainerDied","Data":"fac783339a1ed332dab93147ff38773b5f122da9057a51acb39a36523405e0d6"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.745819 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xcwpl" event={"ID":"a7b15c44-532c-4df9-aaec-d0fee8594570","Type":"ContainerStarted","Data":"25e04e39eda9520d9117e128c33bee39b932aae765eb512834b29569dc15a75b"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.748374 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerStarted","Data":"e5371625664039ef003ccd6ecc409aa4ff50f10657fe501bda6fa82bf8e1a4e4"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.750689 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f","Type":"ContainerStarted","Data":"f588650d7c2fbec19570ee63d059a95a98ad955faeaf8b1e61604e3a1e499ce4"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.760398 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-xvfz2" podStartSLOduration=5.760349781 podStartE2EDuration="5.760349781s" podCreationTimestamp="2025-11-21 19:21:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:06.756885418 +0000 UTC m=+1157.542025455" watchObservedRunningTime="2025-11-21 19:21:06.760349781 +0000 UTC m=+1157.545489808" Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.762854 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-054e-account-create-4x7l8" event={"ID":"0eb288c4-5e71-4918-b69c-918bc5fa4bee","Type":"ContainerStarted","Data":"bfbae9e25027c5ee66380d2e91a70b9cc9d5c7e646c57b632dba2d73296e9ccf"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.765390 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-g5rt5" event={"ID":"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502","Type":"ContainerStarted","Data":"9cc0d069603ccce34b7334203837f5656e0905cc0da53d6ab6a2a36b91c78bbf"} Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.803822 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-d84d-account-create-nx659" podStartSLOduration=5.803795496 podStartE2EDuration="5.803795496s" podCreationTimestamp="2025-11-21 19:21:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:06.77152693 +0000 UTC m=+1157.556666957" watchObservedRunningTime="2025-11-21 19:21:06.803795496 +0000 UTC m=+1157.588935523" Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.906411 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-xcwpl" podStartSLOduration=5.906382166 podStartE2EDuration="5.906382166s" podCreationTimestamp="2025-11-21 19:21:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:06.820672708 +0000 UTC m=+1157.605812735" watchObservedRunningTime="2025-11-21 19:21:06.906382166 +0000 UTC m=+1157.691522203" Nov 21 19:21:06 crc kubenswrapper[4701]: I1121 19:21:06.910848 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-054e-account-create-4x7l8" podStartSLOduration=5.910831025 podStartE2EDuration="5.910831025s" podCreationTimestamp="2025-11-21 19:21:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:06.844031514 +0000 UTC m=+1157.629171561" watchObservedRunningTime="2025-11-21 19:21:06.910831025 +0000 UTC m=+1157.695971052" Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.788468 4701 generic.go:334] "Generic (PLEG): container finished" podID="924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" containerID="9cc0d069603ccce34b7334203837f5656e0905cc0da53d6ab6a2a36b91c78bbf" exitCode=0 Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.789405 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-g5rt5" event={"ID":"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502","Type":"ContainerDied","Data":"9cc0d069603ccce34b7334203837f5656e0905cc0da53d6ab6a2a36b91c78bbf"} Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.797102 4701 generic.go:334] "Generic (PLEG): container finished" podID="cf31154f-0e27-4b6c-889c-608d4a6aaf41" containerID="9ea2d67465568d28afaed8a1e9f21d622ec1447f1387f2b485274e2b58b2beb5" exitCode=0 Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.798042 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xvfz2" event={"ID":"cf31154f-0e27-4b6c-889c-608d4a6aaf41","Type":"ContainerDied","Data":"9ea2d67465568d28afaed8a1e9f21d622ec1447f1387f2b485274e2b58b2beb5"} Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.803669 4701 generic.go:334] "Generic (PLEG): container finished" podID="36850367-a3b5-4f05-9ac6-223e900ab01e" containerID="5fd4bdb33bff2bd7058f9f93b26328f9db1b94f668e4eb96a54670221212c936" exitCode=0 Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.803736 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d84d-account-create-nx659" event={"ID":"36850367-a3b5-4f05-9ac6-223e900ab01e","Type":"ContainerDied","Data":"5fd4bdb33bff2bd7058f9f93b26328f9db1b94f668e4eb96a54670221212c936"} Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.805103 4701 generic.go:334] "Generic (PLEG): container finished" podID="a7b15c44-532c-4df9-aaec-d0fee8594570" containerID="25e04e39eda9520d9117e128c33bee39b932aae765eb512834b29569dc15a75b" exitCode=0 Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.805232 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xcwpl" event={"ID":"a7b15c44-532c-4df9-aaec-d0fee8594570","Type":"ContainerDied","Data":"25e04e39eda9520d9117e128c33bee39b932aae765eb512834b29569dc15a75b"} Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.817829 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f","Type":"ContainerStarted","Data":"f5539334ab9736bbed6685f701e56cfc27be15fa5776df9e5fa0c0a05edc61b8"} Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.820001 4701 generic.go:334] "Generic (PLEG): container finished" podID="0eb288c4-5e71-4918-b69c-918bc5fa4bee" containerID="bfbae9e25027c5ee66380d2e91a70b9cc9d5c7e646c57b632dba2d73296e9ccf" exitCode=0 Nov 21 19:21:07 crc kubenswrapper[4701]: I1121 19:21:07.820240 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-054e-account-create-4x7l8" event={"ID":"0eb288c4-5e71-4918-b69c-918bc5fa4bee","Type":"ContainerDied","Data":"bfbae9e25027c5ee66380d2e91a70b9cc9d5c7e646c57b632dba2d73296e9ccf"} Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.226584 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.329632 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ptnt\" (UniqueName: \"kubernetes.io/projected/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-kube-api-access-9ptnt\") pod \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.329930 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-operator-scripts\") pod \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\" (UID: \"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502\") " Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.330553 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" (UID: "924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.331033 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.336045 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-kube-api-access-9ptnt" (OuterVolumeSpecName: "kube-api-access-9ptnt") pod "924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" (UID: "924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502"). InnerVolumeSpecName "kube-api-access-9ptnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.338225 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.432181 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h52gt\" (UniqueName: \"kubernetes.io/projected/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-kube-api-access-h52gt\") pod \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.432901 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-operator-scripts\") pod \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\" (UID: \"b9e5386f-281c-4a6a-bbe4-0b1f15b82869\") " Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.433445 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ptnt\" (UniqueName: \"kubernetes.io/projected/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502-kube-api-access-9ptnt\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.433937 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9e5386f-281c-4a6a-bbe4-0b1f15b82869" (UID: "b9e5386f-281c-4a6a-bbe4-0b1f15b82869"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.437503 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-kube-api-access-h52gt" (OuterVolumeSpecName: "kube-api-access-h52gt") pod "b9e5386f-281c-4a6a-bbe4-0b1f15b82869" (UID: "b9e5386f-281c-4a6a-bbe4-0b1f15b82869"). InnerVolumeSpecName "kube-api-access-h52gt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.535672 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h52gt\" (UniqueName: \"kubernetes.io/projected/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-kube-api-access-h52gt\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.535714 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9e5386f-281c-4a6a-bbe4-0b1f15b82869-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.834731 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.834768 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0ce8-account-create-pzhxk" event={"ID":"b9e5386f-281c-4a6a-bbe4-0b1f15b82869","Type":"ContainerDied","Data":"784ac7756d65b7c8473804ef82a8e2b9c77bb53d0f44c3fea39bd9caf8a2561d"} Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.835996 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="784ac7756d65b7c8473804ef82a8e2b9c77bb53d0f44c3fea39bd9caf8a2561d" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.838434 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerStarted","Data":"914382995af859f4d91baa0f9ed81e8112bebee52cbe07753bafe722e690abd5"} Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.838487 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerStarted","Data":"266ee3d4178a9dfb5192a29b74f111ee210b6c26276757101f9d5147077ce212"} Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.841482 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f","Type":"ContainerStarted","Data":"dc7d1401393e8db77cb988c584d493abd46cb71cef5449955691b76bea2a94c6"} Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.842781 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.844706 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-g5rt5" event={"ID":"924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502","Type":"ContainerDied","Data":"63b03d5f268e0b5914bbbf49199751308722f2b1eee03bc3b1e85b78371cbae5"} Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.844769 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63b03d5f268e0b5914bbbf49199751308722f2b1eee03bc3b1e85b78371cbae5" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.844955 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-g5rt5" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.911879 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.911847302 podStartE2EDuration="4.911847302s" podCreationTimestamp="2025-11-21 19:21:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:08.873784862 +0000 UTC m=+1159.658924889" watchObservedRunningTime="2025-11-21 19:21:08.911847302 +0000 UTC m=+1159.696987329" Nov 21 19:21:08 crc kubenswrapper[4701]: I1121 19:21:08.952471 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:21:08 crc kubenswrapper[4701]: E1121 19:21:08.952866 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(8a7a5be4-96a4-4574-9839-2d0576595305)\"" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.446903 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.463034 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.564556 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldcpg\" (UniqueName: \"kubernetes.io/projected/a7b15c44-532c-4df9-aaec-d0fee8594570-kube-api-access-ldcpg\") pod \"a7b15c44-532c-4df9-aaec-d0fee8594570\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.565294 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7b15c44-532c-4df9-aaec-d0fee8594570-operator-scripts\") pod \"a7b15c44-532c-4df9-aaec-d0fee8594570\" (UID: \"a7b15c44-532c-4df9-aaec-d0fee8594570\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.565629 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwnbx\" (UniqueName: \"kubernetes.io/projected/cf31154f-0e27-4b6c-889c-608d4a6aaf41-kube-api-access-wwnbx\") pod \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.565766 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf31154f-0e27-4b6c-889c-608d4a6aaf41-operator-scripts\") pod \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\" (UID: \"cf31154f-0e27-4b6c-889c-608d4a6aaf41\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.567700 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7b15c44-532c-4df9-aaec-d0fee8594570-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7b15c44-532c-4df9-aaec-d0fee8594570" (UID: "a7b15c44-532c-4df9-aaec-d0fee8594570"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.567874 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf31154f-0e27-4b6c-889c-608d4a6aaf41-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cf31154f-0e27-4b6c-889c-608d4a6aaf41" (UID: "cf31154f-0e27-4b6c-889c-608d4a6aaf41"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.575093 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf31154f-0e27-4b6c-889c-608d4a6aaf41-kube-api-access-wwnbx" (OuterVolumeSpecName: "kube-api-access-wwnbx") pod "cf31154f-0e27-4b6c-889c-608d4a6aaf41" (UID: "cf31154f-0e27-4b6c-889c-608d4a6aaf41"). InnerVolumeSpecName "kube-api-access-wwnbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.578424 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7b15c44-532c-4df9-aaec-d0fee8594570-kube-api-access-ldcpg" (OuterVolumeSpecName: "kube-api-access-ldcpg") pod "a7b15c44-532c-4df9-aaec-d0fee8594570" (UID: "a7b15c44-532c-4df9-aaec-d0fee8594570"). InnerVolumeSpecName "kube-api-access-ldcpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.648545 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.650591 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.669334 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldcpg\" (UniqueName: \"kubernetes.io/projected/a7b15c44-532c-4df9-aaec-d0fee8594570-kube-api-access-ldcpg\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.669457 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7b15c44-532c-4df9-aaec-d0fee8594570-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.669514 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwnbx\" (UniqueName: \"kubernetes.io/projected/cf31154f-0e27-4b6c-889c-608d4a6aaf41-kube-api-access-wwnbx\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.669569 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf31154f-0e27-4b6c-889c-608d4a6aaf41-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.770421 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsjvk\" (UniqueName: \"kubernetes.io/projected/0eb288c4-5e71-4918-b69c-918bc5fa4bee-kube-api-access-hsjvk\") pod \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.770592 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrj95\" (UniqueName: \"kubernetes.io/projected/36850367-a3b5-4f05-9ac6-223e900ab01e-kube-api-access-nrj95\") pod \"36850367-a3b5-4f05-9ac6-223e900ab01e\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.770711 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0eb288c4-5e71-4918-b69c-918bc5fa4bee-operator-scripts\") pod \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\" (UID: \"0eb288c4-5e71-4918-b69c-918bc5fa4bee\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.770788 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36850367-a3b5-4f05-9ac6-223e900ab01e-operator-scripts\") pod \"36850367-a3b5-4f05-9ac6-223e900ab01e\" (UID: \"36850367-a3b5-4f05-9ac6-223e900ab01e\") " Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.771395 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eb288c4-5e71-4918-b69c-918bc5fa4bee-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0eb288c4-5e71-4918-b69c-918bc5fa4bee" (UID: "0eb288c4-5e71-4918-b69c-918bc5fa4bee"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.771634 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36850367-a3b5-4f05-9ac6-223e900ab01e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36850367-a3b5-4f05-9ac6-223e900ab01e" (UID: "36850367-a3b5-4f05-9ac6-223e900ab01e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.774794 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eb288c4-5e71-4918-b69c-918bc5fa4bee-kube-api-access-hsjvk" (OuterVolumeSpecName: "kube-api-access-hsjvk") pod "0eb288c4-5e71-4918-b69c-918bc5fa4bee" (UID: "0eb288c4-5e71-4918-b69c-918bc5fa4bee"). InnerVolumeSpecName "kube-api-access-hsjvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.774847 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36850367-a3b5-4f05-9ac6-223e900ab01e-kube-api-access-nrj95" (OuterVolumeSpecName: "kube-api-access-nrj95") pod "36850367-a3b5-4f05-9ac6-223e900ab01e" (UID: "36850367-a3b5-4f05-9ac6-223e900ab01e"). InnerVolumeSpecName "kube-api-access-nrj95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.873972 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36850367-a3b5-4f05-9ac6-223e900ab01e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.874012 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsjvk\" (UniqueName: \"kubernetes.io/projected/0eb288c4-5e71-4918-b69c-918bc5fa4bee-kube-api-access-hsjvk\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.874027 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrj95\" (UniqueName: \"kubernetes.io/projected/36850367-a3b5-4f05-9ac6-223e900ab01e-kube-api-access-nrj95\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.874039 4701 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0eb288c4-5e71-4918-b69c-918bc5fa4bee-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.877010 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xvfz2" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.876992 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xvfz2" event={"ID":"cf31154f-0e27-4b6c-889c-608d4a6aaf41","Type":"ContainerDied","Data":"5180d1044c6b8be8d6e5438af03cc9f5cb692aa0999749f685b5975f1c95b1cf"} Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.877829 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5180d1044c6b8be8d6e5438af03cc9f5cb692aa0999749f685b5975f1c95b1cf" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.883946 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d84d-account-create-nx659" event={"ID":"36850367-a3b5-4f05-9ac6-223e900ab01e","Type":"ContainerDied","Data":"1659107a34c9eb2dd333ed47e2837c2049ad0c4dd128e95ddccb26600bc55e9c"} Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.883996 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1659107a34c9eb2dd333ed47e2837c2049ad0c4dd128e95ddccb26600bc55e9c" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.884083 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d84d-account-create-nx659" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.896326 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xcwpl" event={"ID":"a7b15c44-532c-4df9-aaec-d0fee8594570","Type":"ContainerDied","Data":"adc515ba00d88247ae6e47a75bfc21ef31090d649e291f36c1e7e602a9ebf321"} Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.896361 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xcwpl" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.896413 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adc515ba00d88247ae6e47a75bfc21ef31090d649e291f36c1e7e602a9ebf321" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.900411 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerStarted","Data":"904528a00e48383a7e417835fc2a58fb3cdcb1da2ad118ff7917e840cf940403"} Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.915454 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-054e-account-create-4x7l8" Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.915895 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-054e-account-create-4x7l8" event={"ID":"0eb288c4-5e71-4918-b69c-918bc5fa4bee","Type":"ContainerDied","Data":"9b3f49cc2daa980d6619256591dc71beede47403a922431232b14142f0a6ab32"} Nov 21 19:21:09 crc kubenswrapper[4701]: I1121 19:21:09.915998 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b3f49cc2daa980d6619256591dc71beede47403a922431232b14142f0a6ab32" Nov 21 19:21:10 crc kubenswrapper[4701]: I1121 19:21:10.971231 4701 generic.go:334] "Generic (PLEG): container finished" podID="1c543587-173c-4fb2-b730-72b848f845d6" containerID="430f8778df60ba3d059bc9aa9fa12d81c20d41994db5d5fd007530b6d67dbe5f" exitCode=137 Nov 21 19:21:10 crc kubenswrapper[4701]: I1121 19:21:10.971404 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869574dbc6-l96tx" event={"ID":"1c543587-173c-4fb2-b730-72b848f845d6","Type":"ContainerDied","Data":"430f8778df60ba3d059bc9aa9fa12d81c20d41994db5d5fd007530b6d67dbe5f"} Nov 21 19:21:10 crc kubenswrapper[4701]: I1121 19:21:10.971939 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-869574dbc6-l96tx" event={"ID":"1c543587-173c-4fb2-b730-72b848f845d6","Type":"ContainerDied","Data":"32e52f609acf5e70d5c372dfb6d4b8fec3c2f408b85ac21539daa39172bd37a5"} Nov 21 19:21:10 crc kubenswrapper[4701]: I1121 19:21:10.971956 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32e52f609acf5e70d5c372dfb6d4b8fec3c2f408b85ac21539daa39172bd37a5" Nov 21 19:21:10 crc kubenswrapper[4701]: I1121 19:21:10.980869 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.113773 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-secret-key\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.113878 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-combined-ca-bundle\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.113942 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rn697\" (UniqueName: \"kubernetes.io/projected/1c543587-173c-4fb2-b730-72b848f845d6-kube-api-access-rn697\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.114020 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c543587-173c-4fb2-b730-72b848f845d6-logs\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.114064 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-tls-certs\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.114137 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-config-data\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.114185 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-scripts\") pod \"1c543587-173c-4fb2-b730-72b848f845d6\" (UID: \"1c543587-173c-4fb2-b730-72b848f845d6\") " Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.115620 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c543587-173c-4fb2-b730-72b848f845d6-logs" (OuterVolumeSpecName: "logs") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.123675 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.125450 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c543587-173c-4fb2-b730-72b848f845d6-kube-api-access-rn697" (OuterVolumeSpecName: "kube-api-access-rn697") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "kube-api-access-rn697". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.143788 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-config-data" (OuterVolumeSpecName: "config-data") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.151501 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-scripts" (OuterVolumeSpecName: "scripts") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.157556 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.196447 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "1c543587-173c-4fb2-b730-72b848f845d6" (UID: "1c543587-173c-4fb2-b730-72b848f845d6"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217012 4701 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217053 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217066 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rn697\" (UniqueName: \"kubernetes.io/projected/1c543587-173c-4fb2-b730-72b848f845d6-kube-api-access-rn697\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217081 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c543587-173c-4fb2-b730-72b848f845d6-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217092 4701 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c543587-173c-4fb2-b730-72b848f845d6-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217101 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.217111 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c543587-173c-4fb2-b730-72b848f845d6-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.990004 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-869574dbc6-l96tx" Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.990149 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerStarted","Data":"e67912a9dc67c88144e93a3f31bf9a54e2fe78d03e69a16bbda6039fd4eb5c1f"} Nov 21 19:21:11 crc kubenswrapper[4701]: I1121 19:21:11.991150 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.021376 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.377965649 podStartE2EDuration="8.021351147s" podCreationTimestamp="2025-11-21 19:21:04 +0000 UTC" firstStartedPulling="2025-11-21 19:21:06.198924179 +0000 UTC m=+1156.984064206" lastFinishedPulling="2025-11-21 19:21:10.842309657 +0000 UTC m=+1161.627449704" observedRunningTime="2025-11-21 19:21:12.015515401 +0000 UTC m=+1162.800655428" watchObservedRunningTime="2025-11-21 19:21:12.021351147 +0000 UTC m=+1162.806491164" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.055527 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-869574dbc6-l96tx"] Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.075341 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-869574dbc6-l96tx"] Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085237 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wtbqn"] Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085632 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b15c44-532c-4df9-aaec-d0fee8594570" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085649 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b15c44-532c-4df9-aaec-d0fee8594570" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085662 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36850367-a3b5-4f05-9ac6-223e900ab01e" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085668 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="36850367-a3b5-4f05-9ac6-223e900ab01e" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085688 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085694 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085707 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf31154f-0e27-4b6c-889c-608d4a6aaf41" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085713 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf31154f-0e27-4b6c-889c-608d4a6aaf41" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085726 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb288c4-5e71-4918-b69c-918bc5fa4bee" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085732 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb288c4-5e71-4918-b69c-918bc5fa4bee" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085746 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085753 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085769 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon-log" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085775 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon-log" Nov 21 19:21:12 crc kubenswrapper[4701]: E1121 19:21:12.085786 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e5386f-281c-4a6a-bbe4-0b1f15b82869" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085792 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e5386f-281c-4a6a-bbe4-0b1f15b82869" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.085959 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf31154f-0e27-4b6c-889c-608d4a6aaf41" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086015 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon-log" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086031 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086041 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7b15c44-532c-4df9-aaec-d0fee8594570" containerName="mariadb-database-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086052 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c543587-173c-4fb2-b730-72b848f845d6" containerName="horizon" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086066 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="36850367-a3b5-4f05-9ac6-223e900ab01e" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086076 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9e5386f-281c-4a6a-bbe4-0b1f15b82869" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086091 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eb288c4-5e71-4918-b69c-918bc5fa4bee" containerName="mariadb-account-create" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.086790 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.095069 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wtbqn"] Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.101665 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mzccp" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.101853 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.101978 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.139624 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-scripts\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.139876 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-config-data\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.139983 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.140074 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc6zt\" (UniqueName: \"kubernetes.io/projected/63425ae3-f835-4767-b6dd-f5519aa2b0ae-kube-api-access-hc6zt\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.242524 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-scripts\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.242576 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-config-data\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.242615 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.242652 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc6zt\" (UniqueName: \"kubernetes.io/projected/63425ae3-f835-4767-b6dd-f5519aa2b0ae-kube-api-access-hc6zt\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.248045 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.251875 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-config-data\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.252891 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-scripts\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.261531 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc6zt\" (UniqueName: \"kubernetes.io/projected/63425ae3-f835-4767-b6dd-f5519aa2b0ae-kube-api-access-hc6zt\") pod \"nova-cell0-conductor-db-sync-wtbqn\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:12 crc kubenswrapper[4701]: I1121 19:21:12.415797 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:13 crc kubenswrapper[4701]: W1121 19:21:13.189999 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63425ae3_f835_4767_b6dd_f5519aa2b0ae.slice/crio-a39fd64723585193a458c05ac633c2fe08ea842d0af3e2e51a352241312c43ab WatchSource:0}: Error finding container a39fd64723585193a458c05ac633c2fe08ea842d0af3e2e51a352241312c43ab: Status 404 returned error can't find the container with id a39fd64723585193a458c05ac633c2fe08ea842d0af3e2e51a352241312c43ab Nov 21 19:21:13 crc kubenswrapper[4701]: I1121 19:21:13.192405 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wtbqn"] Nov 21 19:21:13 crc kubenswrapper[4701]: I1121 19:21:13.753122 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:21:13 crc kubenswrapper[4701]: I1121 19:21:13.756296 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" Nov 21 19:21:13 crc kubenswrapper[4701]: I1121 19:21:13.972697 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c543587-173c-4fb2-b730-72b848f845d6" path="/var/lib/kubelet/pods/1c543587-173c-4fb2-b730-72b848f845d6/volumes" Nov 21 19:21:14 crc kubenswrapper[4701]: I1121 19:21:14.030621 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" event={"ID":"63425ae3-f835-4767-b6dd-f5519aa2b0ae","Type":"ContainerStarted","Data":"a39fd64723585193a458c05ac633c2fe08ea842d0af3e2e51a352241312c43ab"} Nov 21 19:21:16 crc kubenswrapper[4701]: I1121 19:21:16.918977 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:16 crc kubenswrapper[4701]: I1121 19:21:16.919570 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-central-agent" containerID="cri-o://266ee3d4178a9dfb5192a29b74f111ee210b6c26276757101f9d5147077ce212" gracePeriod=30 Nov 21 19:21:16 crc kubenswrapper[4701]: I1121 19:21:16.920053 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="proxy-httpd" containerID="cri-o://e67912a9dc67c88144e93a3f31bf9a54e2fe78d03e69a16bbda6039fd4eb5c1f" gracePeriod=30 Nov 21 19:21:16 crc kubenswrapper[4701]: I1121 19:21:16.920097 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="sg-core" containerID="cri-o://904528a00e48383a7e417835fc2a58fb3cdcb1da2ad118ff7917e840cf940403" gracePeriod=30 Nov 21 19:21:16 crc kubenswrapper[4701]: I1121 19:21:16.920128 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-notification-agent" containerID="cri-o://914382995af859f4d91baa0f9ed81e8112bebee52cbe07753bafe722e690abd5" gracePeriod=30 Nov 21 19:21:17 crc kubenswrapper[4701]: I1121 19:21:17.088169 4701 generic.go:334] "Generic (PLEG): container finished" podID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerID="904528a00e48383a7e417835fc2a58fb3cdcb1da2ad118ff7917e840cf940403" exitCode=2 Nov 21 19:21:17 crc kubenswrapper[4701]: I1121 19:21:17.088230 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerDied","Data":"904528a00e48383a7e417835fc2a58fb3cdcb1da2ad118ff7917e840cf940403"} Nov 21 19:21:17 crc kubenswrapper[4701]: I1121 19:21:17.170455 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.103700 4701 generic.go:334] "Generic (PLEG): container finished" podID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerID="e67912a9dc67c88144e93a3f31bf9a54e2fe78d03e69a16bbda6039fd4eb5c1f" exitCode=0 Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.103741 4701 generic.go:334] "Generic (PLEG): container finished" podID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerID="914382995af859f4d91baa0f9ed81e8112bebee52cbe07753bafe722e690abd5" exitCode=0 Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.103750 4701 generic.go:334] "Generic (PLEG): container finished" podID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerID="266ee3d4178a9dfb5192a29b74f111ee210b6c26276757101f9d5147077ce212" exitCode=0 Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.103790 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerDied","Data":"e67912a9dc67c88144e93a3f31bf9a54e2fe78d03e69a16bbda6039fd4eb5c1f"} Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.103901 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerDied","Data":"914382995af859f4d91baa0f9ed81e8112bebee52cbe07753bafe722e690abd5"} Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.103925 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerDied","Data":"266ee3d4178a9dfb5192a29b74f111ee210b6c26276757101f9d5147077ce212"} Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.614060 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.614587 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.614656 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.615824 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0522a5d31d2783b232fd70ced5acfdf22c3becfa61b128f650faf72c65913cd6"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:21:18 crc kubenswrapper[4701]: I1121 19:21:18.615905 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://0522a5d31d2783b232fd70ced5acfdf22c3becfa61b128f650faf72c65913cd6" gracePeriod=600 Nov 21 19:21:19 crc kubenswrapper[4701]: I1121 19:21:19.122839 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="0522a5d31d2783b232fd70ced5acfdf22c3becfa61b128f650faf72c65913cd6" exitCode=0 Nov 21 19:21:19 crc kubenswrapper[4701]: I1121 19:21:19.122930 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"0522a5d31d2783b232fd70ced5acfdf22c3becfa61b128f650faf72c65913cd6"} Nov 21 19:21:19 crc kubenswrapper[4701]: I1121 19:21:19.122988 4701 scope.go:117] "RemoveContainer" containerID="76a0edd10d4f17051fb2f677c020a3884e840a31cfe72eb7d10bdd5a1c9d63b1" Nov 21 19:21:21 crc kubenswrapper[4701]: I1121 19:21:21.955227 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.361941 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.364251 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-log" containerID="cri-o://803760a56a2a27c3e07d50ac5e35e05f32afa6e79b2155c799be6eb22e06c580" gracePeriod=30 Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.364487 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-httpd" containerID="cri-o://51cd7ff1d8dd2ee7688bd11d6def9bbfc0bf26e2008f3397f468491c8935dae4" gracePeriod=30 Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.756307 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878244 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-log-httpd\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878669 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg6vq\" (UniqueName: \"kubernetes.io/projected/c236db90-f438-443f-8ec3-ac0b599bfbc0-kube-api-access-mg6vq\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878774 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-combined-ca-bundle\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878800 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-run-httpd\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878830 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-config-data\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878897 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-scripts\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.878944 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-sg-core-conf-yaml\") pod \"c236db90-f438-443f-8ec3-ac0b599bfbc0\" (UID: \"c236db90-f438-443f-8ec3-ac0b599bfbc0\") " Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.879487 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.879763 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.894372 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-scripts" (OuterVolumeSpecName: "scripts") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.894481 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c236db90-f438-443f-8ec3-ac0b599bfbc0-kube-api-access-mg6vq" (OuterVolumeSpecName: "kube-api-access-mg6vq") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "kube-api-access-mg6vq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.921768 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.982185 4701 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.982246 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg6vq\" (UniqueName: \"kubernetes.io/projected/c236db90-f438-443f-8ec3-ac0b599bfbc0-kube-api-access-mg6vq\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.982263 4701 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c236db90-f438-443f-8ec3-ac0b599bfbc0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.982272 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:24 crc kubenswrapper[4701]: I1121 19:21:24.982282 4701 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.018392 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-config-data" (OuterVolumeSpecName: "config-data") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.020389 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c236db90-f438-443f-8ec3-ac0b599bfbc0" (UID: "c236db90-f438-443f-8ec3-ac0b599bfbc0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.083936 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.083998 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c236db90-f438-443f-8ec3-ac0b599bfbc0-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.183534 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" event={"ID":"63425ae3-f835-4767-b6dd-f5519aa2b0ae","Type":"ContainerStarted","Data":"3c57170e69e5dee3f6a3e241c55d44b8d8a5b743b6c39b137e0d778e32cf6b3e"} Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.188084 4701 generic.go:334] "Generic (PLEG): container finished" podID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerID="803760a56a2a27c3e07d50ac5e35e05f32afa6e79b2155c799be6eb22e06c580" exitCode=143 Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.188180 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07d6bc32-28a2-40bf-bd2e-9454dfaac91f","Type":"ContainerDied","Data":"803760a56a2a27c3e07d50ac5e35e05f32afa6e79b2155c799be6eb22e06c580"} Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.190829 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerStarted","Data":"4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833"} Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.194262 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.194296 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c236db90-f438-443f-8ec3-ac0b599bfbc0","Type":"ContainerDied","Data":"e5371625664039ef003ccd6ecc409aa4ff50f10657fe501bda6fa82bf8e1a4e4"} Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.194337 4701 scope.go:117] "RemoveContainer" containerID="e67912a9dc67c88144e93a3f31bf9a54e2fe78d03e69a16bbda6039fd4eb5c1f" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.202630 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"d6fe7caeaff234a352e7f7e2aad2e24b43f59b2b97fc616eef788494436369d1"} Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.208399 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" podStartSLOduration=1.948614778 podStartE2EDuration="13.20837712s" podCreationTimestamp="2025-11-21 19:21:12 +0000 UTC" firstStartedPulling="2025-11-21 19:21:13.191756006 +0000 UTC m=+1163.976896033" lastFinishedPulling="2025-11-21 19:21:24.451518348 +0000 UTC m=+1175.236658375" observedRunningTime="2025-11-21 19:21:25.199543413 +0000 UTC m=+1175.984683450" watchObservedRunningTime="2025-11-21 19:21:25.20837712 +0000 UTC m=+1175.993517147" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.219341 4701 scope.go:117] "RemoveContainer" containerID="904528a00e48383a7e417835fc2a58fb3cdcb1da2ad118ff7917e840cf940403" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.240562 4701 scope.go:117] "RemoveContainer" containerID="914382995af859f4d91baa0f9ed81e8112bebee52cbe07753bafe722e690abd5" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.273130 4701 scope.go:117] "RemoveContainer" containerID="266ee3d4178a9dfb5192a29b74f111ee210b6c26276757101f9d5147077ce212" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.285133 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.292414 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.340151 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:25 crc kubenswrapper[4701]: E1121 19:21:25.341239 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="proxy-httpd" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341260 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="proxy-httpd" Nov 21 19:21:25 crc kubenswrapper[4701]: E1121 19:21:25.341271 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-central-agent" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341278 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-central-agent" Nov 21 19:21:25 crc kubenswrapper[4701]: E1121 19:21:25.341299 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="sg-core" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341305 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="sg-core" Nov 21 19:21:25 crc kubenswrapper[4701]: E1121 19:21:25.341343 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-notification-agent" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341352 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-notification-agent" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341544 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="sg-core" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341564 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="proxy-httpd" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341579 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-central-agent" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.341600 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" containerName="ceilometer-notification-agent" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.343664 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.349684 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.349933 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.358535 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389043 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-scripts\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389110 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-log-httpd\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389151 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-run-httpd\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389179 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389351 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-config-data\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389374 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2xnk\" (UniqueName: \"kubernetes.io/projected/936744f0-7e7e-46bc-8534-29b07d74fd07-kube-api-access-v2xnk\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.389403 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490679 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-scripts\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490747 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-log-httpd\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490788 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-run-httpd\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490810 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490836 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-config-data\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490854 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2xnk\" (UniqueName: \"kubernetes.io/projected/936744f0-7e7e-46bc-8534-29b07d74fd07-kube-api-access-v2xnk\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.490875 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.491858 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-run-httpd\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.492132 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-log-httpd\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.499042 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.499306 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-scripts\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.502303 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.510297 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-config-data\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.514945 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2xnk\" (UniqueName: \"kubernetes.io/projected/936744f0-7e7e-46bc-8534-29b07d74fd07-kube-api-access-v2xnk\") pod \"ceilometer-0\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.671708 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:21:25 crc kubenswrapper[4701]: I1121 19:21:25.968447 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c236db90-f438-443f-8ec3-ac0b599bfbc0" path="/var/lib/kubelet/pods/c236db90-f438-443f-8ec3-ac0b599bfbc0/volumes" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.208598 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.234378 4701 generic.go:334] "Generic (PLEG): container finished" podID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerID="51cd7ff1d8dd2ee7688bd11d6def9bbfc0bf26e2008f3397f468491c8935dae4" exitCode=0 Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.234516 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07d6bc32-28a2-40bf-bd2e-9454dfaac91f","Type":"ContainerDied","Data":"51cd7ff1d8dd2ee7688bd11d6def9bbfc0bf26e2008f3397f468491c8935dae4"} Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.263087 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.484574 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.484986 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.535768 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.536039 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-log" containerID="cri-o://d51ba6844549ff4e635dc6a826803800a634850d1c97119683ae52f8c6de2946" gracePeriod=30 Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.536698 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-httpd" containerID="cri-o://e5dd62460d381b928eb7d0e39d1a32367e929fc7e8a19f0e97ec53f0e28f9a3e" gracePeriod=30 Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.562236 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.682664 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738326 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-scripts\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738498 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738543 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-public-tls-certs\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738567 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cchn7\" (UniqueName: \"kubernetes.io/projected/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-kube-api-access-cchn7\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738620 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-combined-ca-bundle\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738678 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-logs\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738740 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-httpd-run\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.738794 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-config-data\") pod \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\" (UID: \"07d6bc32-28a2-40bf-bd2e-9454dfaac91f\") " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.740620 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-logs" (OuterVolumeSpecName: "logs") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.756675 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.760703 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.762387 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-scripts" (OuterVolumeSpecName: "scripts") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.765937 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-kube-api-access-cchn7" (OuterVolumeSpecName: "kube-api-access-cchn7") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "kube-api-access-cchn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.796690 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.841424 4701 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.841497 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.841546 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.841560 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cchn7\" (UniqueName: \"kubernetes.io/projected/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-kube-api-access-cchn7\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.841577 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.841589 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.911824 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.940121 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-config-data" (OuterVolumeSpecName: "config-data") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.948661 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.950765 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:26 crc kubenswrapper[4701]: I1121 19:21:26.960564 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "07d6bc32-28a2-40bf-bd2e-9454dfaac91f" (UID: "07d6bc32-28a2-40bf-bd2e-9454dfaac91f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.052987 4701 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d6bc32-28a2-40bf-bd2e-9454dfaac91f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.254685 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerStarted","Data":"ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e"} Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.255250 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerStarted","Data":"5b8cde21d3ba57ae8673717035a39b8d46c30b77c088179820be35fd169a4c14"} Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.261374 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.261393 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07d6bc32-28a2-40bf-bd2e-9454dfaac91f","Type":"ContainerDied","Data":"027dbad315dc1cea733237b8d6c9bac7e53b1dccb09b2f3be95ad24b224a276d"} Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.261474 4701 scope.go:117] "RemoveContainer" containerID="51cd7ff1d8dd2ee7688bd11d6def9bbfc0bf26e2008f3397f468491c8935dae4" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.269349 4701 generic.go:334] "Generic (PLEG): container finished" podID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerID="d51ba6844549ff4e635dc6a826803800a634850d1c97119683ae52f8c6de2946" exitCode=143 Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.269470 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6","Type":"ContainerDied","Data":"d51ba6844549ff4e635dc6a826803800a634850d1c97119683ae52f8c6de2946"} Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.303826 4701 scope.go:117] "RemoveContainer" containerID="803760a56a2a27c3e07d50ac5e35e05f32afa6e79b2155c799be6eb22e06c580" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.314129 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.384598 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.430309 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.453354 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.463558 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:21:27 crc kubenswrapper[4701]: E1121 19:21:27.464052 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-httpd" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.464070 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-httpd" Nov 21 19:21:27 crc kubenswrapper[4701]: E1121 19:21:27.464124 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-log" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.464146 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-log" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.464500 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-log" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.464540 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" containerName="glance-httpd" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.465575 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.468609 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.476530 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.489048 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.572922 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-scripts\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573043 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573081 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573104 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b405de0f-6523-4d69-b8a7-a73528f0df37-logs\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573155 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573189 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-config-data\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573222 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b405de0f-6523-4d69-b8a7-a73528f0df37-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.573246 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4vj6\" (UniqueName: \"kubernetes.io/projected/b405de0f-6523-4d69-b8a7-a73528f0df37-kube-api-access-f4vj6\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.674861 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.674964 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.674996 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b405de0f-6523-4d69-b8a7-a73528f0df37-logs\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675055 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675090 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-config-data\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675113 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b405de0f-6523-4d69-b8a7-a73528f0df37-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675139 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4vj6\" (UniqueName: \"kubernetes.io/projected/b405de0f-6523-4d69-b8a7-a73528f0df37-kube-api-access-f4vj6\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675216 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-scripts\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675963 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b405de0f-6523-4d69-b8a7-a73528f0df37-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.675989 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.676093 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b405de0f-6523-4d69-b8a7-a73528f0df37-logs\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.685442 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-config-data\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.686312 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.692767 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-scripts\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.695740 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b405de0f-6523-4d69-b8a7-a73528f0df37-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.699377 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4vj6\" (UniqueName: \"kubernetes.io/projected/b405de0f-6523-4d69-b8a7-a73528f0df37-kube-api-access-f4vj6\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.741382 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b405de0f-6523-4d69-b8a7-a73528f0df37\") " pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.817946 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.929655 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:21:27 crc kubenswrapper[4701]: I1121 19:21:27.965409 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07d6bc32-28a2-40bf-bd2e-9454dfaac91f" path="/var/lib/kubelet/pods/07d6bc32-28a2-40bf-bd2e-9454dfaac91f/volumes" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.291049 4701 generic.go:334] "Generic (PLEG): container finished" podID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerID="e5dd62460d381b928eb7d0e39d1a32367e929fc7e8a19f0e97ec53f0e28f9a3e" exitCode=0 Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.291475 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6","Type":"ContainerDied","Data":"e5dd62460d381b928eb7d0e39d1a32367e929fc7e8a19f0e97ec53f0e28f9a3e"} Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.303524 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerStarted","Data":"703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66"} Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.456121 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507157 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-internal-tls-certs\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507411 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-logs\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507448 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-scripts\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507478 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-combined-ca-bundle\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507601 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8flk\" (UniqueName: \"kubernetes.io/projected/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-kube-api-access-j8flk\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507728 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507791 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-config-data\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.507828 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-httpd-run\") pod \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\" (UID: \"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6\") " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.508302 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-logs" (OuterVolumeSpecName: "logs") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.511497 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.515608 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.516229 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.517098 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.566767 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-scripts" (OuterVolumeSpecName: "scripts") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.567782 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-kube-api-access-j8flk" (OuterVolumeSpecName: "kube-api-access-j8flk") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "kube-api-access-j8flk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.626880 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.626915 4701 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.626927 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.626937 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8flk\" (UniqueName: \"kubernetes.io/projected/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-kube-api-access-j8flk\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.630359 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.653793 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-config-data" (OuterVolumeSpecName: "config-data") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.706931 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.728397 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" (UID: "d5b0816b-f10a-4e0a-86bc-3f0eda5253a6"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.728704 4701 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.728732 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.728744 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:28 crc kubenswrapper[4701]: I1121 19:21:28.728754 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.318007 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5b0816b-f10a-4e0a-86bc-3f0eda5253a6","Type":"ContainerDied","Data":"cfc39a83c45eb7d4928203c37a9f852116aa6eabd6a1c0f1fb48b098cab6484d"} Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.318772 4701 scope.go:117] "RemoveContainer" containerID="e5dd62460d381b928eb7d0e39d1a32367e929fc7e8a19f0e97ec53f0e28f9a3e" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.319026 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.336398 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b405de0f-6523-4d69-b8a7-a73528f0df37","Type":"ContainerStarted","Data":"06c31932b626c602e70b741fd977588d987d2d2cfcf01c1689210b5f67878f2b"} Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.336799 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b405de0f-6523-4d69-b8a7-a73528f0df37","Type":"ContainerStarted","Data":"e1a4ef2d0588cfc0461baafc1139b58103182e9a1bc8e76576d4ac24f14eae3d"} Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.340312 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" containerID="cri-o://4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" gracePeriod=30 Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.340824 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerStarted","Data":"80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f"} Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.442319 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.451537 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.479487 4701 scope.go:117] "RemoveContainer" containerID="d51ba6844549ff4e635dc6a826803800a634850d1c97119683ae52f8c6de2946" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.482452 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:21:29 crc kubenswrapper[4701]: E1121 19:21:29.483728 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-httpd" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.483755 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-httpd" Nov 21 19:21:29 crc kubenswrapper[4701]: E1121 19:21:29.483775 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-log" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.483782 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-log" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.483988 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-log" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.484019 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" containerName="glance-httpd" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.486174 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.492379 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.495225 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.522551 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.559760 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50767206-6e50-48ab-ab5f-2eee90151470-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.559908 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.560216 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk5hk\" (UniqueName: \"kubernetes.io/projected/50767206-6e50-48ab-ab5f-2eee90151470-kube-api-access-fk5hk\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.560310 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.560366 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-config-data\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.560409 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.560447 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50767206-6e50-48ab-ab5f-2eee90151470-logs\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.560470 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-scripts\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663529 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-config-data\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663605 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663642 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50767206-6e50-48ab-ab5f-2eee90151470-logs\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663661 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-scripts\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663695 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50767206-6e50-48ab-ab5f-2eee90151470-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663781 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663817 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk5hk\" (UniqueName: \"kubernetes.io/projected/50767206-6e50-48ab-ab5f-2eee90151470-kube-api-access-fk5hk\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.663842 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.664322 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50767206-6e50-48ab-ab5f-2eee90151470-logs\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.664359 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.665750 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50767206-6e50-48ab-ab5f-2eee90151470-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.671038 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.671950 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.672348 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-config-data\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.672434 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50767206-6e50-48ab-ab5f-2eee90151470-scripts\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.692091 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk5hk\" (UniqueName: \"kubernetes.io/projected/50767206-6e50-48ab-ab5f-2eee90151470-kube-api-access-fk5hk\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.721443 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50767206-6e50-48ab-ab5f-2eee90151470\") " pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.878531 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:29 crc kubenswrapper[4701]: I1121 19:21:29.976724 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5b0816b-f10a-4e0a-86bc-3f0eda5253a6" path="/var/lib/kubelet/pods/d5b0816b-f10a-4e0a-86bc-3f0eda5253a6/volumes" Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.362306 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b405de0f-6523-4d69-b8a7-a73528f0df37","Type":"ContainerStarted","Data":"43e10d32e2e772b08f93d84b8ce4ff3dc5d2cd02d96e71b23c384734e883aefd"} Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.366212 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerStarted","Data":"d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4"} Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.366439 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="sg-core" containerID="cri-o://80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f" gracePeriod=30 Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.366471 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="proxy-httpd" containerID="cri-o://d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4" gracePeriod=30 Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.366474 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.366539 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-notification-agent" containerID="cri-o://703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66" gracePeriod=30 Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.366573 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-central-agent" containerID="cri-o://ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e" gracePeriod=30 Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.400889 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.4008717600000002 podStartE2EDuration="3.40087176s" podCreationTimestamp="2025-11-21 19:21:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:30.393233755 +0000 UTC m=+1181.178373782" watchObservedRunningTime="2025-11-21 19:21:30.40087176 +0000 UTC m=+1181.186011787" Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.433160 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.909409594 podStartE2EDuration="5.433133685s" podCreationTimestamp="2025-11-21 19:21:25 +0000 UTC" firstStartedPulling="2025-11-21 19:21:26.262700626 +0000 UTC m=+1177.047840653" lastFinishedPulling="2025-11-21 19:21:29.786424717 +0000 UTC m=+1180.571564744" observedRunningTime="2025-11-21 19:21:30.426785574 +0000 UTC m=+1181.211925601" watchObservedRunningTime="2025-11-21 19:21:30.433133685 +0000 UTC m=+1181.218273712" Nov 21 19:21:30 crc kubenswrapper[4701]: I1121 19:21:30.586092 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 19:21:31 crc kubenswrapper[4701]: I1121 19:21:31.383942 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50767206-6e50-48ab-ab5f-2eee90151470","Type":"ContainerStarted","Data":"7b9e1e920f373d3859431dcf65ec71c57e9be3d1f81fecff75236f3377c29df2"} Nov 21 19:21:31 crc kubenswrapper[4701]: I1121 19:21:31.384787 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50767206-6e50-48ab-ab5f-2eee90151470","Type":"ContainerStarted","Data":"1e605bf0616169075a515a4f02becbaf749f2323d49a657ccb8dd84a73d328c7"} Nov 21 19:21:31 crc kubenswrapper[4701]: I1121 19:21:31.388839 4701 generic.go:334] "Generic (PLEG): container finished" podID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerID="80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f" exitCode=2 Nov 21 19:21:31 crc kubenswrapper[4701]: I1121 19:21:31.388870 4701 generic.go:334] "Generic (PLEG): container finished" podID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerID="703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66" exitCode=0 Nov 21 19:21:31 crc kubenswrapper[4701]: I1121 19:21:31.390529 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerDied","Data":"80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f"} Nov 21 19:21:31 crc kubenswrapper[4701]: I1121 19:21:31.390593 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerDied","Data":"703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66"} Nov 21 19:21:32 crc kubenswrapper[4701]: I1121 19:21:32.404041 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50767206-6e50-48ab-ab5f-2eee90151470","Type":"ContainerStarted","Data":"30c6218ad68d546b8b84ef9a8abdf5c0a02cced5e43d71e8f6a16664eb951970"} Nov 21 19:21:32 crc kubenswrapper[4701]: I1121 19:21:32.432915 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.4328874369999998 podStartE2EDuration="3.432887437s" podCreationTimestamp="2025-11-21 19:21:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:32.426738763 +0000 UTC m=+1183.211878790" watchObservedRunningTime="2025-11-21 19:21:32.432887437 +0000 UTC m=+1183.218027464" Nov 21 19:21:37 crc kubenswrapper[4701]: I1121 19:21:37.818855 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 19:21:37 crc kubenswrapper[4701]: I1121 19:21:37.819654 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 19:21:37 crc kubenswrapper[4701]: I1121 19:21:37.852827 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 19:21:37 crc kubenswrapper[4701]: I1121 19:21:37.886682 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 19:21:38 crc kubenswrapper[4701]: I1121 19:21:38.498969 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 19:21:38 crc kubenswrapper[4701]: I1121 19:21:38.499022 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 19:21:39 crc kubenswrapper[4701]: I1121 19:21:39.879608 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:39 crc kubenswrapper[4701]: I1121 19:21:39.880252 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:39 crc kubenswrapper[4701]: I1121 19:21:39.925743 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:39 crc kubenswrapper[4701]: I1121 19:21:39.942573 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:40 crc kubenswrapper[4701]: I1121 19:21:40.337313 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 19:21:40 crc kubenswrapper[4701]: I1121 19:21:40.352420 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 19:21:40 crc kubenswrapper[4701]: I1121 19:21:40.530887 4701 generic.go:334] "Generic (PLEG): container finished" podID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerID="ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e" exitCode=0 Nov 21 19:21:40 crc kubenswrapper[4701]: I1121 19:21:40.530956 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerDied","Data":"ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e"} Nov 21 19:21:40 crc kubenswrapper[4701]: I1121 19:21:40.531675 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:40 crc kubenswrapper[4701]: I1121 19:21:40.531767 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:41 crc kubenswrapper[4701]: I1121 19:21:41.544616 4701 generic.go:334] "Generic (PLEG): container finished" podID="63425ae3-f835-4767-b6dd-f5519aa2b0ae" containerID="3c57170e69e5dee3f6a3e241c55d44b8d8a5b743b6c39b137e0d778e32cf6b3e" exitCode=0 Nov 21 19:21:41 crc kubenswrapper[4701]: I1121 19:21:41.544735 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" event={"ID":"63425ae3-f835-4767-b6dd-f5519aa2b0ae","Type":"ContainerDied","Data":"3c57170e69e5dee3f6a3e241c55d44b8d8a5b743b6c39b137e0d778e32cf6b3e"} Nov 21 19:21:42 crc kubenswrapper[4701]: I1121 19:21:42.392744 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:42 crc kubenswrapper[4701]: I1121 19:21:42.479576 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.093700 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.223136 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc6zt\" (UniqueName: \"kubernetes.io/projected/63425ae3-f835-4767-b6dd-f5519aa2b0ae-kube-api-access-hc6zt\") pod \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.223267 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-combined-ca-bundle\") pod \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.223332 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-scripts\") pod \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.223530 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-config-data\") pod \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\" (UID: \"63425ae3-f835-4767-b6dd-f5519aa2b0ae\") " Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.230674 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-scripts" (OuterVolumeSpecName: "scripts") pod "63425ae3-f835-4767-b6dd-f5519aa2b0ae" (UID: "63425ae3-f835-4767-b6dd-f5519aa2b0ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.232021 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63425ae3-f835-4767-b6dd-f5519aa2b0ae-kube-api-access-hc6zt" (OuterVolumeSpecName: "kube-api-access-hc6zt") pod "63425ae3-f835-4767-b6dd-f5519aa2b0ae" (UID: "63425ae3-f835-4767-b6dd-f5519aa2b0ae"). InnerVolumeSpecName "kube-api-access-hc6zt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.267321 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63425ae3-f835-4767-b6dd-f5519aa2b0ae" (UID: "63425ae3-f835-4767-b6dd-f5519aa2b0ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.280695 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-config-data" (OuterVolumeSpecName: "config-data") pod "63425ae3-f835-4767-b6dd-f5519aa2b0ae" (UID: "63425ae3-f835-4767-b6dd-f5519aa2b0ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.325795 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.325841 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc6zt\" (UniqueName: \"kubernetes.io/projected/63425ae3-f835-4767-b6dd-f5519aa2b0ae-kube-api-access-hc6zt\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.325855 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.325864 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63425ae3-f835-4767-b6dd-f5519aa2b0ae-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.566816 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.566812 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wtbqn" event={"ID":"63425ae3-f835-4767-b6dd-f5519aa2b0ae","Type":"ContainerDied","Data":"a39fd64723585193a458c05ac633c2fe08ea842d0af3e2e51a352241312c43ab"} Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.566881 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a39fd64723585193a458c05ac633c2fe08ea842d0af3e2e51a352241312c43ab" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.678282 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 19:21:43 crc kubenswrapper[4701]: E1121 19:21:43.678736 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63425ae3-f835-4767-b6dd-f5519aa2b0ae" containerName="nova-cell0-conductor-db-sync" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.678754 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="63425ae3-f835-4767-b6dd-f5519aa2b0ae" containerName="nova-cell0-conductor-db-sync" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.678962 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="63425ae3-f835-4767-b6dd-f5519aa2b0ae" containerName="nova-cell0-conductor-db-sync" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.679699 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.681923 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mzccp" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.682362 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.691802 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.835950 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3d334-08d1-49a9-8483-48402c600ec2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.836407 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4hpx\" (UniqueName: \"kubernetes.io/projected/6ff3d334-08d1-49a9-8483-48402c600ec2-kube-api-access-t4hpx\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.836492 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3d334-08d1-49a9-8483-48402c600ec2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.940010 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3d334-08d1-49a9-8483-48402c600ec2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.940221 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3d334-08d1-49a9-8483-48402c600ec2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.940263 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4hpx\" (UniqueName: \"kubernetes.io/projected/6ff3d334-08d1-49a9-8483-48402c600ec2-kube-api-access-t4hpx\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.950124 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3d334-08d1-49a9-8483-48402c600ec2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.955058 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3d334-08d1-49a9-8483-48402c600ec2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:43 crc kubenswrapper[4701]: I1121 19:21:43.961529 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4hpx\" (UniqueName: \"kubernetes.io/projected/6ff3d334-08d1-49a9-8483-48402c600ec2-kube-api-access-t4hpx\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3d334-08d1-49a9-8483-48402c600ec2\") " pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:44 crc kubenswrapper[4701]: I1121 19:21:44.059672 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:44 crc kubenswrapper[4701]: W1121 19:21:44.384367 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ff3d334_08d1_49a9_8483_48402c600ec2.slice/crio-71575402c047338a0b67f9c3cc867ef1fde6fa4edb665be6706064d615c234e6 WatchSource:0}: Error finding container 71575402c047338a0b67f9c3cc867ef1fde6fa4edb665be6706064d615c234e6: Status 404 returned error can't find the container with id 71575402c047338a0b67f9c3cc867ef1fde6fa4edb665be6706064d615c234e6 Nov 21 19:21:44 crc kubenswrapper[4701]: I1121 19:21:44.387331 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 19:21:44 crc kubenswrapper[4701]: I1121 19:21:44.580178 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ff3d334-08d1-49a9-8483-48402c600ec2","Type":"ContainerStarted","Data":"71575402c047338a0b67f9c3cc867ef1fde6fa4edb665be6706064d615c234e6"} Nov 21 19:21:45 crc kubenswrapper[4701]: I1121 19:21:45.622125 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ff3d334-08d1-49a9-8483-48402c600ec2","Type":"ContainerStarted","Data":"fdcd76b8c6ea32408332a805de4429812871b339f8e901e5fcc2cc508ced3129"} Nov 21 19:21:45 crc kubenswrapper[4701]: I1121 19:21:45.622459 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:45 crc kubenswrapper[4701]: I1121 19:21:45.670139 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.670110616 podStartE2EDuration="2.670110616s" podCreationTimestamp="2025-11-21 19:21:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:45.660733104 +0000 UTC m=+1196.445873171" watchObservedRunningTime="2025-11-21 19:21:45.670110616 +0000 UTC m=+1196.455250673" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.101908 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.656117 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-h694n"] Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.676099 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.681853 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.697781 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.705561 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-h694n"] Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.786648 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.789157 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.793741 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.801190 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.809653 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.809841 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vz7fw\" (UniqueName: \"kubernetes.io/projected/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-kube-api-access-vz7fw\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.809877 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-config-data\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.809899 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-scripts\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913063 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vz7fw\" (UniqueName: \"kubernetes.io/projected/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-kube-api-access-vz7fw\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913121 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-config-data\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913141 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-scripts\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913166 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl99z\" (UniqueName: \"kubernetes.io/projected/b13d184e-6b2c-4f6b-9466-790ba15296cd-kube-api-access-fl99z\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913189 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13d184e-6b2c-4f6b-9466-790ba15296cd-logs\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913230 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-config-data\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913252 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.913290 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.926128 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.926163 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.926911 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.935954 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.937537 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.942546 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.945390 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-scripts\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.946920 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-config-data\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:49 crc kubenswrapper[4701]: I1121 19:21:49.984961 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vz7fw\" (UniqueName: \"kubernetes.io/projected/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-kube-api-access-vz7fw\") pod \"nova-cell0-cell-mapping-h694n\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.027839 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.043397 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.079391 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7l8q\" (UniqueName: \"kubernetes.io/projected/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-kube-api-access-c7l8q\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.080808 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.081052 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-config-data\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.081123 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl99z\" (UniqueName: \"kubernetes.io/projected/b13d184e-6b2c-4f6b-9466-790ba15296cd-kube-api-access-fl99z\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.081174 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13d184e-6b2c-4f6b-9466-790ba15296cd-logs\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.081245 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-config-data\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.081289 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.083748 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.088719 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13d184e-6b2c-4f6b-9466-790ba15296cd-logs\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.089052 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.089799 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.089887 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.104408 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.108367 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.114038 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-config-data\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.128914 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.130483 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.142351 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.149260 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.192590 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7l8q\" (UniqueName: \"kubernetes.io/projected/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-kube-api-access-c7l8q\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.192635 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.192690 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-config-data\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.205975 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66995857cf-dk6r8"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.208293 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.241423 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66995857cf-dk6r8"] Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.242042 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.263899 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7l8q\" (UniqueName: \"kubernetes.io/projected/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-kube-api-access-c7l8q\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.264227 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-config-data\") pod \"nova-scheduler-0\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.264965 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl99z\" (UniqueName: \"kubernetes.io/projected/b13d184e-6b2c-4f6b-9466-790ba15296cd-kube-api-access-fl99z\") pod \"nova-api-0\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.294742 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-svc\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.294806 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-config-data\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.294828 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-nb\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.294979 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78k5q\" (UniqueName: \"kubernetes.io/projected/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-kube-api-access-78k5q\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.295405 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.295439 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-swift-storage-0\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.295488 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-logs\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.295757 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-config\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.295811 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.296142 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.296294 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2s7zg\" (UniqueName: \"kubernetes.io/projected/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-kube-api-access-2s7zg\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.296412 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9k8c\" (UniqueName: \"kubernetes.io/projected/983d33ec-2246-4db3-b4d6-54cca3235d73-kube-api-access-n9k8c\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.296483 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-sb\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.364009 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.398925 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-svc\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399006 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-config-data\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399035 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-nb\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399060 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78k5q\" (UniqueName: \"kubernetes.io/projected/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-kube-api-access-78k5q\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399149 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-swift-storage-0\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399172 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399193 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-logs\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399288 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-config\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399315 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399338 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399371 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s7zg\" (UniqueName: \"kubernetes.io/projected/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-kube-api-access-2s7zg\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399408 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9k8c\" (UniqueName: \"kubernetes.io/projected/983d33ec-2246-4db3-b4d6-54cca3235d73-kube-api-access-n9k8c\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.399451 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-sb\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.401958 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-sb\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.402383 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-logs\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.403266 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-config\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.403462 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-nb\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.404038 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-svc\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.407811 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-swift-storage-0\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.408844 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-config-data\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.409325 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.410235 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.413922 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.423628 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.431288 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78k5q\" (UniqueName: \"kubernetes.io/projected/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-kube-api-access-78k5q\") pod \"nova-metadata-0\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.432914 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9k8c\" (UniqueName: \"kubernetes.io/projected/983d33ec-2246-4db3-b4d6-54cca3235d73-kube-api-access-n9k8c\") pod \"nova-cell1-novncproxy-0\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.450056 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2s7zg\" (UniqueName: \"kubernetes.io/projected/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-kube-api-access-2s7zg\") pod \"dnsmasq-dns-66995857cf-dk6r8\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.618946 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.646144 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.664276 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:50 crc kubenswrapper[4701]: I1121 19:21:50.910128 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-h694n"] Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.140053 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.380686 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-r4vt6"] Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.382392 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.388257 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.388392 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.412799 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-r4vt6"] Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.433761 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.548351 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-scripts\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.548427 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.548487 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns9ql\" (UniqueName: \"kubernetes.io/projected/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-kube-api-access-ns9ql\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.548514 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-config-data\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.651330 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns9ql\" (UniqueName: \"kubernetes.io/projected/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-kube-api-access-ns9ql\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.651414 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-config-data\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.651586 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-scripts\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.651633 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.671701 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-config-data\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.671742 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-scripts\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.671997 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.674785 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns9ql\" (UniqueName: \"kubernetes.io/projected/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-kube-api-access-ns9ql\") pod \"nova-cell1-conductor-db-sync-r4vt6\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.731457 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b13d184e-6b2c-4f6b-9466-790ba15296cd","Type":"ContainerStarted","Data":"34c7ac4b46625a198068fd3151f743ea2966c7595b203bcf229afb1b83134502"} Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.733793 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.734307 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-h694n" event={"ID":"1554914a-f5b4-46aa-90a4-a9c07bdd6e53","Type":"ContainerStarted","Data":"e6418b9dae36b0f9223e04f84281e520b64a115451b632c0efd4cbc6f2b8aaef"} Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.734355 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-h694n" event={"ID":"1554914a-f5b4-46aa-90a4-a9c07bdd6e53","Type":"ContainerStarted","Data":"8d98016474dd6b260b91188ec296318fb50fd8d130bc53fcc67314829a3e92ba"} Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.743054 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe","Type":"ContainerStarted","Data":"d6c88c698b228133aefc7176793bb0152c87548204bb712a924e5d3c1133af64"} Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.762243 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-h694n" podStartSLOduration=2.762159512 podStartE2EDuration="2.762159512s" podCreationTimestamp="2025-11-21 19:21:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:51.760351855 +0000 UTC m=+1202.545491882" watchObservedRunningTime="2025-11-21 19:21:51.762159512 +0000 UTC m=+1202.547299529" Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.793321 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66995857cf-dk6r8"] Nov 21 19:21:51 crc kubenswrapper[4701]: W1121 19:21:51.801043 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9e1aa13_3797_4779_b94f_f39f2e4fcabd.slice/crio-bea87ad9b77312423857e41341f7e42d7b7fcf6f7326feef9db9f128c53e4bb5 WatchSource:0}: Error finding container bea87ad9b77312423857e41341f7e42d7b7fcf6f7326feef9db9f128c53e4bb5: Status 404 returned error can't find the container with id bea87ad9b77312423857e41341f7e42d7b7fcf6f7326feef9db9f128c53e4bb5 Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.851019 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:21:51 crc kubenswrapper[4701]: I1121 19:21:51.893051 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:51 crc kubenswrapper[4701]: W1121 19:21:51.898030 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b2b3795_4721_4695_a55f_4c7c7fd3ba58.slice/crio-a0daf832f3a8e5389b41b7ab013caaccd02aff6abc933899a1e57dee430e8695 WatchSource:0}: Error finding container a0daf832f3a8e5389b41b7ab013caaccd02aff6abc933899a1e57dee430e8695: Status 404 returned error can't find the container with id a0daf832f3a8e5389b41b7ab013caaccd02aff6abc933899a1e57dee430e8695 Nov 21 19:21:52 crc kubenswrapper[4701]: I1121 19:21:52.442575 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-r4vt6"] Nov 21 19:21:52 crc kubenswrapper[4701]: I1121 19:21:52.763417 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"983d33ec-2246-4db3-b4d6-54cca3235d73","Type":"ContainerStarted","Data":"6c2332d37dda340665afd0fc5af4702b020701c6cfca11b67251bbd2c12dfe63"} Nov 21 19:21:52 crc kubenswrapper[4701]: I1121 19:21:52.766192 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b2b3795-4721-4695-a55f-4c7c7fd3ba58","Type":"ContainerStarted","Data":"a0daf832f3a8e5389b41b7ab013caaccd02aff6abc933899a1e57dee430e8695"} Nov 21 19:21:52 crc kubenswrapper[4701]: I1121 19:21:52.770069 4701 generic.go:334] "Generic (PLEG): container finished" podID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerID="96bc60dfeec492a374d47d3ec95cdf5a8880ba575c2f238544353e0117262340" exitCode=0 Nov 21 19:21:52 crc kubenswrapper[4701]: I1121 19:21:52.771438 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" event={"ID":"e9e1aa13-3797-4779-b94f-f39f2e4fcabd","Type":"ContainerDied","Data":"96bc60dfeec492a374d47d3ec95cdf5a8880ba575c2f238544353e0117262340"} Nov 21 19:21:52 crc kubenswrapper[4701]: I1121 19:21:52.771466 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" event={"ID":"e9e1aa13-3797-4779-b94f-f39f2e4fcabd","Type":"ContainerStarted","Data":"bea87ad9b77312423857e41341f7e42d7b7fcf6f7326feef9db9f128c53e4bb5"} Nov 21 19:21:53 crc kubenswrapper[4701]: I1121 19:21:53.791181 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" event={"ID":"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66","Type":"ContainerStarted","Data":"de8c687b42947b453221bfe75f93a3960e55339e4562dccd90d92f82b517b930"} Nov 21 19:21:54 crc kubenswrapper[4701]: I1121 19:21:54.635355 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:54 crc kubenswrapper[4701]: I1121 19:21:54.645905 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:21:55 crc kubenswrapper[4701]: I1121 19:21:55.678803 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 19:21:56 crc kubenswrapper[4701]: E1121 19:21:56.490562 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 21 19:21:56 crc kubenswrapper[4701]: E1121 19:21:56.498650 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 21 19:21:56 crc kubenswrapper[4701]: E1121 19:21:56.503631 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 21 19:21:56 crc kubenswrapper[4701]: E1121 19:21:56.503722 4701 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-decision-engine-0" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.837047 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe","Type":"ContainerStarted","Data":"22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.839891 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b13d184e-6b2c-4f6b-9466-790ba15296cd","Type":"ContainerStarted","Data":"8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.839971 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b13d184e-6b2c-4f6b-9466-790ba15296cd","Type":"ContainerStarted","Data":"d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.842136 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" event={"ID":"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66","Type":"ContainerStarted","Data":"5ba58a1d76cee00b4c33d1041d44aefbe1a1a858ab8ef46e4371df68016be7cc"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.844500 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b2b3795-4721-4695-a55f-4c7c7fd3ba58","Type":"ContainerStarted","Data":"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.844538 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b2b3795-4721-4695-a55f-4c7c7fd3ba58","Type":"ContainerStarted","Data":"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.844663 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-log" containerID="cri-o://ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018" gracePeriod=30 Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.844959 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-metadata" containerID="cri-o://0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73" gracePeriod=30 Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.847403 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" event={"ID":"e9e1aa13-3797-4779-b94f-f39f2e4fcabd","Type":"ContainerStarted","Data":"9ededf9eb8775b1266dfe37291abc07e8343805f8fd852f9de840631df2dfd88"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.847532 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.859218 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"983d33ec-2246-4db3-b4d6-54cca3235d73","Type":"ContainerStarted","Data":"bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615"} Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.859356 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="983d33ec-2246-4db3-b4d6-54cca3235d73" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615" gracePeriod=30 Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.892314 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.105014735 podStartE2EDuration="7.892281341s" podCreationTimestamp="2025-11-21 19:21:49 +0000 UTC" firstStartedPulling="2025-11-21 19:21:51.923527899 +0000 UTC m=+1202.708667926" lastFinishedPulling="2025-11-21 19:21:55.710794505 +0000 UTC m=+1206.495934532" observedRunningTime="2025-11-21 19:21:56.890258526 +0000 UTC m=+1207.675398563" watchObservedRunningTime="2025-11-21 19:21:56.892281341 +0000 UTC m=+1207.677421398" Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.893909 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.352987123 podStartE2EDuration="7.893895594s" podCreationTimestamp="2025-11-21 19:21:49 +0000 UTC" firstStartedPulling="2025-11-21 19:21:51.165691481 +0000 UTC m=+1201.950831498" lastFinishedPulling="2025-11-21 19:21:55.706599902 +0000 UTC m=+1206.491739969" observedRunningTime="2025-11-21 19:21:56.862248445 +0000 UTC m=+1207.647388472" watchObservedRunningTime="2025-11-21 19:21:56.893895594 +0000 UTC m=+1207.679035651" Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.906377 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.665494051 podStartE2EDuration="7.906363038s" podCreationTimestamp="2025-11-21 19:21:49 +0000 UTC" firstStartedPulling="2025-11-21 19:21:51.447794155 +0000 UTC m=+1202.232934172" lastFinishedPulling="2025-11-21 19:21:55.688663132 +0000 UTC m=+1206.473803159" observedRunningTime="2025-11-21 19:21:56.905546206 +0000 UTC m=+1207.690686233" watchObservedRunningTime="2025-11-21 19:21:56.906363038 +0000 UTC m=+1207.691503095" Nov 21 19:21:56 crc kubenswrapper[4701]: I1121 19:21:56.947106 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.107672375 podStartE2EDuration="6.947065009s" podCreationTimestamp="2025-11-21 19:21:50 +0000 UTC" firstStartedPulling="2025-11-21 19:21:51.871308719 +0000 UTC m=+1202.656448746" lastFinishedPulling="2025-11-21 19:21:55.710701353 +0000 UTC m=+1206.495841380" observedRunningTime="2025-11-21 19:21:56.928784919 +0000 UTC m=+1207.713924946" watchObservedRunningTime="2025-11-21 19:21:56.947065009 +0000 UTC m=+1207.732205076" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.025728 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" podStartSLOduration=7.025706247 podStartE2EDuration="7.025706247s" podCreationTimestamp="2025-11-21 19:21:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:56.959850182 +0000 UTC m=+1207.744990209" watchObservedRunningTime="2025-11-21 19:21:57.025706247 +0000 UTC m=+1207.810846274" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.031173 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" podStartSLOduration=6.031161784 podStartE2EDuration="6.031161784s" podCreationTimestamp="2025-11-21 19:21:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:21:57.019617844 +0000 UTC m=+1207.804757871" watchObservedRunningTime="2025-11-21 19:21:57.031161784 +0000 UTC m=+1207.816301811" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.538675 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.640998 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78k5q\" (UniqueName: \"kubernetes.io/projected/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-kube-api-access-78k5q\") pod \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.641332 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-config-data\") pod \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.641370 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-logs\") pod \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.641487 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-combined-ca-bundle\") pod \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\" (UID: \"2b2b3795-4721-4695-a55f-4c7c7fd3ba58\") " Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.642523 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-logs" (OuterVolumeSpecName: "logs") pod "2b2b3795-4721-4695-a55f-4c7c7fd3ba58" (UID: "2b2b3795-4721-4695-a55f-4c7c7fd3ba58"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.649443 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-kube-api-access-78k5q" (OuterVolumeSpecName: "kube-api-access-78k5q") pod "2b2b3795-4721-4695-a55f-4c7c7fd3ba58" (UID: "2b2b3795-4721-4695-a55f-4c7c7fd3ba58"). InnerVolumeSpecName "kube-api-access-78k5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.674361 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b2b3795-4721-4695-a55f-4c7c7fd3ba58" (UID: "2b2b3795-4721-4695-a55f-4c7c7fd3ba58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.737354 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-config-data" (OuterVolumeSpecName: "config-data") pod "2b2b3795-4721-4695-a55f-4c7c7fd3ba58" (UID: "2b2b3795-4721-4695-a55f-4c7c7fd3ba58"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.746187 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.746264 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.746277 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.746290 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78k5q\" (UniqueName: \"kubernetes.io/projected/2b2b3795-4721-4695-a55f-4c7c7fd3ba58-kube-api-access-78k5q\") on node \"crc\" DevicePath \"\"" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.905816 4701 generic.go:334] "Generic (PLEG): container finished" podID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerID="0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73" exitCode=0 Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.905860 4701 generic.go:334] "Generic (PLEG): container finished" podID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerID="ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018" exitCode=143 Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.908933 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.914803 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b2b3795-4721-4695-a55f-4c7c7fd3ba58","Type":"ContainerDied","Data":"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73"} Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.914875 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b2b3795-4721-4695-a55f-4c7c7fd3ba58","Type":"ContainerDied","Data":"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018"} Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.914889 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b2b3795-4721-4695-a55f-4c7c7fd3ba58","Type":"ContainerDied","Data":"a0daf832f3a8e5389b41b7ab013caaccd02aff6abc933899a1e57dee430e8695"} Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.914923 4701 scope.go:117] "RemoveContainer" containerID="0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.971413 4701 scope.go:117] "RemoveContainer" containerID="ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018" Nov 21 19:21:57 crc kubenswrapper[4701]: I1121 19:21:57.985301 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.004763 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.034044 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:58 crc kubenswrapper[4701]: E1121 19:21:58.034602 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-log" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.034621 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-log" Nov 21 19:21:58 crc kubenswrapper[4701]: E1121 19:21:58.034659 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-metadata" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.034667 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-metadata" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.035013 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-metadata" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.035037 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" containerName="nova-metadata-log" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.037825 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.043398 4701 scope.go:117] "RemoveContainer" containerID="0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.046042 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.046334 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 19:21:58 crc kubenswrapper[4701]: E1121 19:21:58.053843 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73\": container with ID starting with 0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73 not found: ID does not exist" containerID="0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.053891 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73"} err="failed to get container status \"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73\": rpc error: code = NotFound desc = could not find container \"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73\": container with ID starting with 0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73 not found: ID does not exist" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.053919 4701 scope.go:117] "RemoveContainer" containerID="ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018" Nov 21 19:21:58 crc kubenswrapper[4701]: E1121 19:21:58.057480 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018\": container with ID starting with ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018 not found: ID does not exist" containerID="ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.057554 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018"} err="failed to get container status \"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018\": rpc error: code = NotFound desc = could not find container \"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018\": container with ID starting with ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018 not found: ID does not exist" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.057595 4701 scope.go:117] "RemoveContainer" containerID="0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.059747 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73"} err="failed to get container status \"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73\": rpc error: code = NotFound desc = could not find container \"0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73\": container with ID starting with 0d237facb19f08d53b83e815826582dd2b9bcb859dd241933092699facaf3d73 not found: ID does not exist" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.059780 4701 scope.go:117] "RemoveContainer" containerID="ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.062665 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xf4h\" (UniqueName: \"kubernetes.io/projected/855a9455-82eb-49c8-8e99-79cd0e72bb96-kube-api-access-9xf4h\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.062890 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/855a9455-82eb-49c8-8e99-79cd0e72bb96-logs\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.063118 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.063192 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.063248 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-config-data\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.063591 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018"} err="failed to get container status \"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018\": rpc error: code = NotFound desc = could not find container \"ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018\": container with ID starting with ccf9f6759af4ae58c9dcf4e9f5efe82b8b48ee470f8bc191e1fa4fe30d7a1018 not found: ID does not exist" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.070702 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.165142 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/855a9455-82eb-49c8-8e99-79cd0e72bb96-logs\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.165256 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.165288 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.165305 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-config-data\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.165370 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xf4h\" (UniqueName: \"kubernetes.io/projected/855a9455-82eb-49c8-8e99-79cd0e72bb96-kube-api-access-9xf4h\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.165711 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/855a9455-82eb-49c8-8e99-79cd0e72bb96-logs\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.173966 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.177157 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-config-data\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.194934 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xf4h\" (UniqueName: \"kubernetes.io/projected/855a9455-82eb-49c8-8e99-79cd0e72bb96-kube-api-access-9xf4h\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.195109 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.364758 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.830408 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:21:58 crc kubenswrapper[4701]: W1121 19:21:58.835604 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod855a9455_82eb_49c8_8e99_79cd0e72bb96.slice/crio-8e12e68bd0697b17cd8a368dc6966b9abab08ab4946bb5db0e2f2555f76a0c41 WatchSource:0}: Error finding container 8e12e68bd0697b17cd8a368dc6966b9abab08ab4946bb5db0e2f2555f76a0c41: Status 404 returned error can't find the container with id 8e12e68bd0697b17cd8a368dc6966b9abab08ab4946bb5db0e2f2555f76a0c41 Nov 21 19:21:58 crc kubenswrapper[4701]: I1121 19:21:58.926442 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"855a9455-82eb-49c8-8e99-79cd0e72bb96","Type":"ContainerStarted","Data":"8e12e68bd0697b17cd8a368dc6966b9abab08ab4946bb5db0e2f2555f76a0c41"} Nov 21 19:21:59 crc kubenswrapper[4701]: I1121 19:21:59.923623 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 21 19:21:59 crc kubenswrapper[4701]: I1121 19:21:59.985258 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a7a5be4-96a4-4574-9839-2d0576595305" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" exitCode=137 Nov 21 19:21:59 crc kubenswrapper[4701]: I1121 19:21:59.985532 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.008832 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b2b3795-4721-4695-a55f-4c7c7fd3ba58" path="/var/lib/kubelet/pods/2b2b3795-4721-4695-a55f-4c7c7fd3ba58/volumes" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.013165 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"855a9455-82eb-49c8-8e99-79cd0e72bb96","Type":"ContainerStarted","Data":"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5"} Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.013230 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"855a9455-82eb-49c8-8e99-79cd0e72bb96","Type":"ContainerStarted","Data":"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20"} Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.013243 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerDied","Data":"4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833"} Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.013260 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8a7a5be4-96a4-4574-9839-2d0576595305","Type":"ContainerDied","Data":"92eefb8b5ec60ad5206795726c8f8f3ab22a42fcf3b93e43561d3c5d55629a69"} Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.013296 4701 scope.go:117] "RemoveContainer" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.062942 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.075930 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.075907933 podStartE2EDuration="3.075907933s" podCreationTimestamp="2025-11-21 19:21:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:00.05488351 +0000 UTC m=+1210.840023547" watchObservedRunningTime="2025-11-21 19:22:00.075907933 +0000 UTC m=+1210.861047960" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.101769 4701 scope.go:117] "RemoveContainer" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" Nov 21 19:22:00 crc kubenswrapper[4701]: E1121 19:22:00.102512 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833\": container with ID starting with 4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833 not found: ID does not exist" containerID="4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.102573 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833"} err="failed to get container status \"4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833\": rpc error: code = NotFound desc = could not find container \"4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833\": container with ID starting with 4cdc4428ba39945a4fd78e922276c6bc963cbf50eda5e013f6a2e1db189e1833 not found: ID does not exist" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.102647 4701 scope.go:117] "RemoveContainer" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:22:00 crc kubenswrapper[4701]: E1121 19:22:00.102997 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6\": container with ID starting with a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6 not found: ID does not exist" containerID="a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.103045 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6"} err="failed to get container status \"a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6\": rpc error: code = NotFound desc = could not find container \"a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6\": container with ID starting with a0ec6d1a2fc828c1e5eac769e653fa0e9805be850a5d554ea13660925e01ccf6 not found: ID does not exist" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.111028 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-combined-ca-bundle\") pod \"8a7a5be4-96a4-4574-9839-2d0576595305\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.111173 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-custom-prometheus-ca\") pod \"8a7a5be4-96a4-4574-9839-2d0576595305\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.111330 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t845q\" (UniqueName: \"kubernetes.io/projected/8a7a5be4-96a4-4574-9839-2d0576595305-kube-api-access-t845q\") pod \"8a7a5be4-96a4-4574-9839-2d0576595305\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.111367 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-config-data\") pod \"8a7a5be4-96a4-4574-9839-2d0576595305\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.111482 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a7a5be4-96a4-4574-9839-2d0576595305-logs\") pod \"8a7a5be4-96a4-4574-9839-2d0576595305\" (UID: \"8a7a5be4-96a4-4574-9839-2d0576595305\") " Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.113638 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a7a5be4-96a4-4574-9839-2d0576595305-logs" (OuterVolumeSpecName: "logs") pod "8a7a5be4-96a4-4574-9839-2d0576595305" (UID: "8a7a5be4-96a4-4574-9839-2d0576595305"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.122826 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a7a5be4-96a4-4574-9839-2d0576595305-kube-api-access-t845q" (OuterVolumeSpecName: "kube-api-access-t845q") pod "8a7a5be4-96a4-4574-9839-2d0576595305" (UID: "8a7a5be4-96a4-4574-9839-2d0576595305"). InnerVolumeSpecName "kube-api-access-t845q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.151276 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "8a7a5be4-96a4-4574-9839-2d0576595305" (UID: "8a7a5be4-96a4-4574-9839-2d0576595305"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.164668 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a7a5be4-96a4-4574-9839-2d0576595305" (UID: "8a7a5be4-96a4-4574-9839-2d0576595305"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.198712 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-config-data" (OuterVolumeSpecName: "config-data") pod "8a7a5be4-96a4-4574-9839-2d0576595305" (UID: "8a7a5be4-96a4-4574-9839-2d0576595305"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.215776 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t845q\" (UniqueName: \"kubernetes.io/projected/8a7a5be4-96a4-4574-9839-2d0576595305-kube-api-access-t845q\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.216137 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.216301 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a7a5be4-96a4-4574-9839-2d0576595305-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.216421 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.216633 4701 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8a7a5be4-96a4-4574-9839-2d0576595305-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.330559 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.351099 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.365548 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.365627 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.368777 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:22:00 crc kubenswrapper[4701]: E1121 19:22:00.369400 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.369430 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:00 crc kubenswrapper[4701]: E1121 19:22:00.369449 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.369460 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.369744 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.369772 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.370825 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.373032 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.379842 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.404079 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.432807 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.432882 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.434746 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.434801 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-config-data\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.434824 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.434886 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87723852-e421-4a28-a9ce-90390eb3b7a8-logs\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.434905 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prhd5\" (UniqueName: \"kubernetes.io/projected/87723852-e421-4a28-a9ce-90390eb3b7a8-kube-api-access-prhd5\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.537142 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.537227 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-config-data\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.537260 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.537316 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87723852-e421-4a28-a9ce-90390eb3b7a8-logs\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.537344 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prhd5\" (UniqueName: \"kubernetes.io/projected/87723852-e421-4a28-a9ce-90390eb3b7a8-kube-api-access-prhd5\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.538427 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87723852-e421-4a28-a9ce-90390eb3b7a8-logs\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.544538 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.544780 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.545462 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87723852-e421-4a28-a9ce-90390eb3b7a8-config-data\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.566721 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prhd5\" (UniqueName: \"kubernetes.io/projected/87723852-e421-4a28-a9ce-90390eb3b7a8-kube-api-access-prhd5\") pod \"watcher-decision-engine-0\" (UID: \"87723852-e421-4a28-a9ce-90390eb3b7a8\") " pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.646998 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.705217 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:00 crc kubenswrapper[4701]: I1121 19:22:00.995197 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.001066 4701 generic.go:334] "Generic (PLEG): container finished" podID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerID="d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4" exitCode=137 Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.001154 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerDied","Data":"d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4"} Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.001225 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936744f0-7e7e-46bc-8534-29b07d74fd07","Type":"ContainerDied","Data":"5b8cde21d3ba57ae8673717035a39b8d46c30b77c088179820be35fd169a4c14"} Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.001250 4701 scope.go:117] "RemoveContainer" containerID="d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.001424 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.064898 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.081585 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.092412 4701 scope.go:117] "RemoveContainer" containerID="80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f" Nov 21 19:22:01 crc kubenswrapper[4701]: W1121 19:22:01.105156 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87723852_e421_4a28_a9ce_90390eb3b7a8.slice/crio-98f212f50cca5c35d5fca9ce5b9e746a643e9487b858fb731535b39bab2cc5bc WatchSource:0}: Error finding container 98f212f50cca5c35d5fca9ce5b9e746a643e9487b858fb731535b39bab2cc5bc: Status 404 returned error can't find the container with id 98f212f50cca5c35d5fca9ce5b9e746a643e9487b858fb731535b39bab2cc5bc Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.158823 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-run-httpd\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.158872 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-log-httpd\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.158981 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2xnk\" (UniqueName: \"kubernetes.io/projected/936744f0-7e7e-46bc-8534-29b07d74fd07-kube-api-access-v2xnk\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.159075 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-combined-ca-bundle\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.159112 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-scripts\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.159169 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-config-data\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.159283 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-sg-core-conf-yaml\") pod \"936744f0-7e7e-46bc-8534-29b07d74fd07\" (UID: \"936744f0-7e7e-46bc-8534-29b07d74fd07\") " Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.162573 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.165186 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/936744f0-7e7e-46bc-8534-29b07d74fd07-kube-api-access-v2xnk" (OuterVolumeSpecName: "kube-api-access-v2xnk") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "kube-api-access-v2xnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.166452 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.168734 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-scripts" (OuterVolumeSpecName: "scripts") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.226376 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.257511 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.262066 4701 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.262179 4701 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936744f0-7e7e-46bc-8534-29b07d74fd07-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.262271 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2xnk\" (UniqueName: \"kubernetes.io/projected/936744f0-7e7e-46bc-8534-29b07d74fd07-kube-api-access-v2xnk\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.262344 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.262401 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.262460 4701 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.276147 4701 scope.go:117] "RemoveContainer" containerID="703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.311855 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-config-data" (OuterVolumeSpecName: "config-data") pod "936744f0-7e7e-46bc-8534-29b07d74fd07" (UID: "936744f0-7e7e-46bc-8534-29b07d74fd07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.316095 4701 scope.go:117] "RemoveContainer" containerID="ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.355961 4701 scope.go:117] "RemoveContainer" containerID="d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.356385 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4\": container with ID starting with d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4 not found: ID does not exist" containerID="d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.356449 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4"} err="failed to get container status \"d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4\": rpc error: code = NotFound desc = could not find container \"d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4\": container with ID starting with d43d68c93e7472867cb5791d4812d2d6e96ea0c75aebc9193792cacae18297c4 not found: ID does not exist" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.356488 4701 scope.go:117] "RemoveContainer" containerID="80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.356838 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f\": container with ID starting with 80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f not found: ID does not exist" containerID="80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.356914 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f"} err="failed to get container status \"80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f\": rpc error: code = NotFound desc = could not find container \"80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f\": container with ID starting with 80239202b92151898eded8c5347f6cfbbf610ac1e379f29901ab306b9f3aba3f not found: ID does not exist" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.356977 4701 scope.go:117] "RemoveContainer" containerID="703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.357679 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66\": container with ID starting with 703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66 not found: ID does not exist" containerID="703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.357706 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66"} err="failed to get container status \"703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66\": rpc error: code = NotFound desc = could not find container \"703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66\": container with ID starting with 703c53a0745a6913bcdfd9f1347df0be58d3c05200feea2a2723332cb30e6d66 not found: ID does not exist" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.357736 4701 scope.go:117] "RemoveContainer" containerID="ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.358254 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e\": container with ID starting with ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e not found: ID does not exist" containerID="ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.358283 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e"} err="failed to get container status \"ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e\": rpc error: code = NotFound desc = could not find container \"ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e\": container with ID starting with ded90fe47222044d06dc5f3ae3b8baac64d86526b6be22cf5dfee29c54fc4a2e not found: ID does not exist" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.365656 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936744f0-7e7e-46bc-8534-29b07d74fd07-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.515399 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.201:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.515650 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.201:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.649888 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.661819 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.679923 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.680441 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-central-agent" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680464 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-central-agent" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.680481 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-notification-agent" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680488 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-notification-agent" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.680504 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="proxy-httpd" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680510 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="proxy-httpd" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.680537 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="sg-core" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680544 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="sg-core" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.680565 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680572 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:01 crc kubenswrapper[4701]: E1121 19:22:01.680583 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680589 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680792 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="proxy-httpd" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680808 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680817 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="sg-core" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680835 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-notification-agent" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680851 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" containerName="watcher-decision-engine" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.680860 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" containerName="ceilometer-central-agent" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.682898 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.685835 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.685859 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.705757 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.777958 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-log-httpd\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.778162 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-config-data\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.778276 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.778314 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvrz4\" (UniqueName: \"kubernetes.io/projected/1861bc37-79d8-49a4-a995-34c76712a44d-kube-api-access-cvrz4\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.778355 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-scripts\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.778476 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.778668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-run-httpd\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.881316 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-config-data\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.887601 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.887688 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvrz4\" (UniqueName: \"kubernetes.io/projected/1861bc37-79d8-49a4-a995-34c76712a44d-kube-api-access-cvrz4\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.887759 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-scripts\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.887872 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.888096 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-run-httpd\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.888189 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-log-httpd\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.888604 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-run-httpd\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.888733 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-log-httpd\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.891736 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-config-data\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.892605 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.894778 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.902255 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-scripts\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.912063 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvrz4\" (UniqueName: \"kubernetes.io/projected/1861bc37-79d8-49a4-a995-34c76712a44d-kube-api-access-cvrz4\") pod \"ceilometer-0\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " pod="openstack/ceilometer-0" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.978018 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a7a5be4-96a4-4574-9839-2d0576595305" path="/var/lib/kubelet/pods/8a7a5be4-96a4-4574-9839-2d0576595305/volumes" Nov 21 19:22:01 crc kubenswrapper[4701]: I1121 19:22:01.979324 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="936744f0-7e7e-46bc-8534-29b07d74fd07" path="/var/lib/kubelet/pods/936744f0-7e7e-46bc-8534-29b07d74fd07/volumes" Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.002037 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.075310 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-h694n" event={"ID":"1554914a-f5b4-46aa-90a4-a9c07bdd6e53","Type":"ContainerDied","Data":"e6418b9dae36b0f9223e04f84281e520b64a115451b632c0efd4cbc6f2b8aaef"} Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.075196 4701 generic.go:334] "Generic (PLEG): container finished" podID="1554914a-f5b4-46aa-90a4-a9c07bdd6e53" containerID="e6418b9dae36b0f9223e04f84281e520b64a115451b632c0efd4cbc6f2b8aaef" exitCode=0 Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.080118 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"87723852-e421-4a28-a9ce-90390eb3b7a8","Type":"ContainerStarted","Data":"2c9e3d0c360454822ce06d6d6c20db2adff86224e2c575bc040d25c50dc58223"} Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.080163 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"87723852-e421-4a28-a9ce-90390eb3b7a8","Type":"ContainerStarted","Data":"98f212f50cca5c35d5fca9ce5b9e746a643e9487b858fb731535b39bab2cc5bc"} Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.123049 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.123028936 podStartE2EDuration="2.123028936s" podCreationTimestamp="2025-11-21 19:22:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:02.119613465 +0000 UTC m=+1212.904753482" watchObservedRunningTime="2025-11-21 19:22:02.123028936 +0000 UTC m=+1212.908168963" Nov 21 19:22:02 crc kubenswrapper[4701]: I1121 19:22:02.588062 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:02 crc kubenswrapper[4701]: W1121 19:22:02.597139 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1861bc37_79d8_49a4_a995_34c76712a44d.slice/crio-9a2efe9c2bbbdc4cb2a0329ec95309ac3da672bebd5bcf6f44314658e8e0bd32 WatchSource:0}: Error finding container 9a2efe9c2bbbdc4cb2a0329ec95309ac3da672bebd5bcf6f44314658e8e0bd32: Status 404 returned error can't find the container with id 9a2efe9c2bbbdc4cb2a0329ec95309ac3da672bebd5bcf6f44314658e8e0bd32 Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.115492 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerStarted","Data":"d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e"} Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.116268 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerStarted","Data":"9a2efe9c2bbbdc4cb2a0329ec95309ac3da672bebd5bcf6f44314658e8e0bd32"} Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.371303 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.373398 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.470459 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.537171 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-config-data\") pod \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.537261 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-combined-ca-bundle\") pod \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.537324 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-scripts\") pod \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.537425 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vz7fw\" (UniqueName: \"kubernetes.io/projected/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-kube-api-access-vz7fw\") pod \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\" (UID: \"1554914a-f5b4-46aa-90a4-a9c07bdd6e53\") " Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.545270 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-kube-api-access-vz7fw" (OuterVolumeSpecName: "kube-api-access-vz7fw") pod "1554914a-f5b4-46aa-90a4-a9c07bdd6e53" (UID: "1554914a-f5b4-46aa-90a4-a9c07bdd6e53"). InnerVolumeSpecName "kube-api-access-vz7fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.549361 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-scripts" (OuterVolumeSpecName: "scripts") pod "1554914a-f5b4-46aa-90a4-a9c07bdd6e53" (UID: "1554914a-f5b4-46aa-90a4-a9c07bdd6e53"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.579995 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-config-data" (OuterVolumeSpecName: "config-data") pod "1554914a-f5b4-46aa-90a4-a9c07bdd6e53" (UID: "1554914a-f5b4-46aa-90a4-a9c07bdd6e53"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.580948 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1554914a-f5b4-46aa-90a4-a9c07bdd6e53" (UID: "1554914a-f5b4-46aa-90a4-a9c07bdd6e53"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.639462 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.639502 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.639517 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:03 crc kubenswrapper[4701]: I1121 19:22:03.639526 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vz7fw\" (UniqueName: \"kubernetes.io/projected/1554914a-f5b4-46aa-90a4-a9c07bdd6e53-kube-api-access-vz7fw\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.132260 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerStarted","Data":"bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633"} Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.132613 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerStarted","Data":"0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669"} Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.137014 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-h694n" Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.137516 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-h694n" event={"ID":"1554914a-f5b4-46aa-90a4-a9c07bdd6e53","Type":"ContainerDied","Data":"8d98016474dd6b260b91188ec296318fb50fd8d130bc53fcc67314829a3e92ba"} Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.137543 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d98016474dd6b260b91188ec296318fb50fd8d130bc53fcc67314829a3e92ba" Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.292432 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.292758 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-log" containerID="cri-o://d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640" gracePeriod=30 Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.292881 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-api" containerID="cri-o://8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898" gracePeriod=30 Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.309969 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.310252 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" containerName="nova-scheduler-scheduler" containerID="cri-o://22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778" gracePeriod=30 Nov 21 19:22:04 crc kubenswrapper[4701]: I1121 19:22:04.337923 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:04 crc kubenswrapper[4701]: E1121 19:22:04.580577 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb13d184e_6b2c_4f6b_9466_790ba15296cd.slice/crio-conmon-d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640.scope\": RecentStats: unable to find data in memory cache]" Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.148107 4701 generic.go:334] "Generic (PLEG): container finished" podID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerID="d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640" exitCode=143 Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.148361 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b13d184e-6b2c-4f6b-9466-790ba15296cd","Type":"ContainerDied","Data":"d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640"} Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.149549 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-log" containerID="cri-o://ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20" gracePeriod=30 Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.149719 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-metadata" containerID="cri-o://ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5" gracePeriod=30 Nov 21 19:22:05 crc kubenswrapper[4701]: E1121 19:22:05.374016 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 19:22:05 crc kubenswrapper[4701]: E1121 19:22:05.379746 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 19:22:05 crc kubenswrapper[4701]: E1121 19:22:05.384694 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 19:22:05 crc kubenswrapper[4701]: E1121 19:22:05.384866 4701 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" containerName="nova-scheduler-scheduler" Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.666410 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.742549 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-679559bbc5-wxbzl"] Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.742851 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerName="dnsmasq-dns" containerID="cri-o://c9e99e980d9315c400d62de2d04b8c8d3eb4e81d8e2179a33e5b4c9591156de5" gracePeriod=10 Nov 21 19:22:05 crc kubenswrapper[4701]: I1121 19:22:05.925699 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.015897 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/855a9455-82eb-49c8-8e99-79cd0e72bb96-logs\") pod \"855a9455-82eb-49c8-8e99-79cd0e72bb96\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.016307 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-nova-metadata-tls-certs\") pod \"855a9455-82eb-49c8-8e99-79cd0e72bb96\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.016362 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xf4h\" (UniqueName: \"kubernetes.io/projected/855a9455-82eb-49c8-8e99-79cd0e72bb96-kube-api-access-9xf4h\") pod \"855a9455-82eb-49c8-8e99-79cd0e72bb96\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.016470 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-config-data\") pod \"855a9455-82eb-49c8-8e99-79cd0e72bb96\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.016570 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-combined-ca-bundle\") pod \"855a9455-82eb-49c8-8e99-79cd0e72bb96\" (UID: \"855a9455-82eb-49c8-8e99-79cd0e72bb96\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.018599 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/855a9455-82eb-49c8-8e99-79cd0e72bb96-logs" (OuterVolumeSpecName: "logs") pod "855a9455-82eb-49c8-8e99-79cd0e72bb96" (UID: "855a9455-82eb-49c8-8e99-79cd0e72bb96"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.028429 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/855a9455-82eb-49c8-8e99-79cd0e72bb96-kube-api-access-9xf4h" (OuterVolumeSpecName: "kube-api-access-9xf4h") pod "855a9455-82eb-49c8-8e99-79cd0e72bb96" (UID: "855a9455-82eb-49c8-8e99-79cd0e72bb96"). InnerVolumeSpecName "kube-api-access-9xf4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.061461 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "855a9455-82eb-49c8-8e99-79cd0e72bb96" (UID: "855a9455-82eb-49c8-8e99-79cd0e72bb96"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.102944 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-config-data" (OuterVolumeSpecName: "config-data") pod "855a9455-82eb-49c8-8e99-79cd0e72bb96" (UID: "855a9455-82eb-49c8-8e99-79cd0e72bb96"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.119522 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.119560 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.119576 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/855a9455-82eb-49c8-8e99-79cd0e72bb96-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.119585 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xf4h\" (UniqueName: \"kubernetes.io/projected/855a9455-82eb-49c8-8e99-79cd0e72bb96-kube-api-access-9xf4h\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.130362 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "855a9455-82eb-49c8-8e99-79cd0e72bb96" (UID: "855a9455-82eb-49c8-8e99-79cd0e72bb96"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.165555 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerStarted","Data":"415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245"} Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.168456 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.174447 4701 generic.go:334] "Generic (PLEG): container finished" podID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerID="c9e99e980d9315c400d62de2d04b8c8d3eb4e81d8e2179a33e5b4c9591156de5" exitCode=0 Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.174723 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" event={"ID":"c4234702-4265-4b3a-ab18-9ba8d244ea33","Type":"ContainerDied","Data":"c9e99e980d9315c400d62de2d04b8c8d3eb4e81d8e2179a33e5b4c9591156de5"} Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179777 4701 generic.go:334] "Generic (PLEG): container finished" podID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerID="ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5" exitCode=0 Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179808 4701 generic.go:334] "Generic (PLEG): container finished" podID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerID="ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20" exitCode=143 Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179834 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"855a9455-82eb-49c8-8e99-79cd0e72bb96","Type":"ContainerDied","Data":"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5"} Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179858 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"855a9455-82eb-49c8-8e99-79cd0e72bb96","Type":"ContainerDied","Data":"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20"} Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179870 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"855a9455-82eb-49c8-8e99-79cd0e72bb96","Type":"ContainerDied","Data":"8e12e68bd0697b17cd8a368dc6966b9abab08ab4946bb5db0e2f2555f76a0c41"} Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179889 4701 scope.go:117] "RemoveContainer" containerID="ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.179939 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.222260 4701 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/855a9455-82eb-49c8-8e99-79cd0e72bb96-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.234227 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5735652140000003 podStartE2EDuration="5.234185755s" podCreationTimestamp="2025-11-21 19:22:01 +0000 UTC" firstStartedPulling="2025-11-21 19:22:02.600850936 +0000 UTC m=+1213.385991003" lastFinishedPulling="2025-11-21 19:22:05.261471517 +0000 UTC m=+1216.046611544" observedRunningTime="2025-11-21 19:22:06.194602664 +0000 UTC m=+1216.979742691" watchObservedRunningTime="2025-11-21 19:22:06.234185755 +0000 UTC m=+1217.019325782" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.244638 4701 scope.go:117] "RemoveContainer" containerID="ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.249167 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.296836 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.306490 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:06 crc kubenswrapper[4701]: E1121 19:22:06.307104 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-log" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.307117 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-log" Nov 21 19:22:06 crc kubenswrapper[4701]: E1121 19:22:06.307136 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-metadata" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.307145 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-metadata" Nov 21 19:22:06 crc kubenswrapper[4701]: E1121 19:22:06.307176 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1554914a-f5b4-46aa-90a4-a9c07bdd6e53" containerName="nova-manage" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.307182 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1554914a-f5b4-46aa-90a4-a9c07bdd6e53" containerName="nova-manage" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.307466 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-metadata" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.307488 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" containerName="nova-metadata-log" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.307517 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1554914a-f5b4-46aa-90a4-a9c07bdd6e53" containerName="nova-manage" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.309589 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.315490 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.320498 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.320742 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.324474 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-config-data\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.324521 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.324567 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7q5m\" (UniqueName: \"kubernetes.io/projected/66b0eb91-a81c-4a68-8e9d-a1042d709e80-kube-api-access-p7q5m\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.324595 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b0eb91-a81c-4a68-8e9d-a1042d709e80-logs\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.324647 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.386554 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.393361 4701 scope.go:117] "RemoveContainer" containerID="ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5" Nov 21 19:22:06 crc kubenswrapper[4701]: E1121 19:22:06.397188 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5\": container with ID starting with ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5 not found: ID does not exist" containerID="ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.397259 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5"} err="failed to get container status \"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5\": rpc error: code = NotFound desc = could not find container \"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5\": container with ID starting with ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5 not found: ID does not exist" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.397292 4701 scope.go:117] "RemoveContainer" containerID="ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20" Nov 21 19:22:06 crc kubenswrapper[4701]: E1121 19:22:06.399771 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20\": container with ID starting with ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20 not found: ID does not exist" containerID="ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.399797 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20"} err="failed to get container status \"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20\": rpc error: code = NotFound desc = could not find container \"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20\": container with ID starting with ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20 not found: ID does not exist" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.399814 4701 scope.go:117] "RemoveContainer" containerID="ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.400152 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5"} err="failed to get container status \"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5\": rpc error: code = NotFound desc = could not find container \"ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5\": container with ID starting with ca951ae8c9f9d30da31912ae7b86160d36457829c253fa3501fa092691235ea5 not found: ID does not exist" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.400173 4701 scope.go:117] "RemoveContainer" containerID="ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.400727 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20"} err="failed to get container status \"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20\": rpc error: code = NotFound desc = could not find container \"ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20\": container with ID starting with ef1bd0dd32c6a19d79b73a2702022b745abbfd6a0c79032b5be106f7bab76f20 not found: ID does not exist" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.427683 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-config\") pod \"c4234702-4265-4b3a-ab18-9ba8d244ea33\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.427761 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-sb\") pod \"c4234702-4265-4b3a-ab18-9ba8d244ea33\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.427897 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-svc\") pod \"c4234702-4265-4b3a-ab18-9ba8d244ea33\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.427979 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-swift-storage-0\") pod \"c4234702-4265-4b3a-ab18-9ba8d244ea33\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428069 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-nb\") pod \"c4234702-4265-4b3a-ab18-9ba8d244ea33\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428303 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxmvq\" (UniqueName: \"kubernetes.io/projected/c4234702-4265-4b3a-ab18-9ba8d244ea33-kube-api-access-hxmvq\") pod \"c4234702-4265-4b3a-ab18-9ba8d244ea33\" (UID: \"c4234702-4265-4b3a-ab18-9ba8d244ea33\") " Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428737 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-config-data\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428790 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428837 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7q5m\" (UniqueName: \"kubernetes.io/projected/66b0eb91-a81c-4a68-8e9d-a1042d709e80-kube-api-access-p7q5m\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428872 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b0eb91-a81c-4a68-8e9d-a1042d709e80-logs\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.428935 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.438839 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b0eb91-a81c-4a68-8e9d-a1042d709e80-logs\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.449884 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4234702-4265-4b3a-ab18-9ba8d244ea33-kube-api-access-hxmvq" (OuterVolumeSpecName: "kube-api-access-hxmvq") pod "c4234702-4265-4b3a-ab18-9ba8d244ea33" (UID: "c4234702-4265-4b3a-ab18-9ba8d244ea33"). InnerVolumeSpecName "kube-api-access-hxmvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.450693 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.456680 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-config-data\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.457884 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.476048 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7q5m\" (UniqueName: \"kubernetes.io/projected/66b0eb91-a81c-4a68-8e9d-a1042d709e80-kube-api-access-p7q5m\") pod \"nova-metadata-0\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " pod="openstack/nova-metadata-0" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.528390 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c4234702-4265-4b3a-ab18-9ba8d244ea33" (UID: "c4234702-4265-4b3a-ab18-9ba8d244ea33"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.537757 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxmvq\" (UniqueName: \"kubernetes.io/projected/c4234702-4265-4b3a-ab18-9ba8d244ea33-kube-api-access-hxmvq\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.537781 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.538522 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c4234702-4265-4b3a-ab18-9ba8d244ea33" (UID: "c4234702-4265-4b3a-ab18-9ba8d244ea33"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.542620 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c4234702-4265-4b3a-ab18-9ba8d244ea33" (UID: "c4234702-4265-4b3a-ab18-9ba8d244ea33"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.545753 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c4234702-4265-4b3a-ab18-9ba8d244ea33" (UID: "c4234702-4265-4b3a-ab18-9ba8d244ea33"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.562322 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-config" (OuterVolumeSpecName: "config") pod "c4234702-4265-4b3a-ab18-9ba8d244ea33" (UID: "c4234702-4265-4b3a-ab18-9ba8d244ea33"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.639803 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.639841 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.639853 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.639871 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4234702-4265-4b3a-ab18-9ba8d244ea33-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:06 crc kubenswrapper[4701]: I1121 19:22:06.688322 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.213118 4701 generic.go:334] "Generic (PLEG): container finished" podID="4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" containerID="5ba58a1d76cee00b4c33d1041d44aefbe1a1a858ab8ef46e4371df68016be7cc" exitCode=0 Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.213314 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" event={"ID":"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66","Type":"ContainerDied","Data":"5ba58a1d76cee00b4c33d1041d44aefbe1a1a858ab8ef46e4371df68016be7cc"} Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.217996 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.223855 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" event={"ID":"c4234702-4265-4b3a-ab18-9ba8d244ea33","Type":"ContainerDied","Data":"e8c70a12416b5309e85170969e76782e13f1d635e04f443ff47c22f173758277"} Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.223905 4701 scope.go:117] "RemoveContainer" containerID="c9e99e980d9315c400d62de2d04b8c8d3eb4e81d8e2179a33e5b4c9591156de5" Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.224108 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-679559bbc5-wxbzl" Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.281871 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-679559bbc5-wxbzl"] Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.284365 4701 scope.go:117] "RemoveContainer" containerID="55e6ab3dcb0ef36e836fd94d76a324c64c2b2a989d0e9044abb1fe814f2ee7ff" Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.309606 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-679559bbc5-wxbzl"] Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.965192 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="855a9455-82eb-49c8-8e99-79cd0e72bb96" path="/var/lib/kubelet/pods/855a9455-82eb-49c8-8e99-79cd0e72bb96/volumes" Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.967282 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" path="/var/lib/kubelet/pods/c4234702-4265-4b3a-ab18-9ba8d244ea33/volumes" Nov 21 19:22:07 crc kubenswrapper[4701]: I1121 19:22:07.988574 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.095881 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13d184e-6b2c-4f6b-9466-790ba15296cd-logs\") pod \"b13d184e-6b2c-4f6b-9466-790ba15296cd\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.095962 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-combined-ca-bundle\") pod \"b13d184e-6b2c-4f6b-9466-790ba15296cd\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.096081 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fl99z\" (UniqueName: \"kubernetes.io/projected/b13d184e-6b2c-4f6b-9466-790ba15296cd-kube-api-access-fl99z\") pod \"b13d184e-6b2c-4f6b-9466-790ba15296cd\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.096152 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-config-data\") pod \"b13d184e-6b2c-4f6b-9466-790ba15296cd\" (UID: \"b13d184e-6b2c-4f6b-9466-790ba15296cd\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.096980 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b13d184e-6b2c-4f6b-9466-790ba15296cd-logs" (OuterVolumeSpecName: "logs") pod "b13d184e-6b2c-4f6b-9466-790ba15296cd" (UID: "b13d184e-6b2c-4f6b-9466-790ba15296cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.097482 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13d184e-6b2c-4f6b-9466-790ba15296cd-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.101475 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b13d184e-6b2c-4f6b-9466-790ba15296cd-kube-api-access-fl99z" (OuterVolumeSpecName: "kube-api-access-fl99z") pod "b13d184e-6b2c-4f6b-9466-790ba15296cd" (UID: "b13d184e-6b2c-4f6b-9466-790ba15296cd"). InnerVolumeSpecName "kube-api-access-fl99z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.133528 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b13d184e-6b2c-4f6b-9466-790ba15296cd" (UID: "b13d184e-6b2c-4f6b-9466-790ba15296cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.138980 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-config-data" (OuterVolumeSpecName: "config-data") pod "b13d184e-6b2c-4f6b-9466-790ba15296cd" (UID: "b13d184e-6b2c-4f6b-9466-790ba15296cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.199347 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.199407 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fl99z\" (UniqueName: \"kubernetes.io/projected/b13d184e-6b2c-4f6b-9466-790ba15296cd-kube-api-access-fl99z\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.199452 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13d184e-6b2c-4f6b-9466-790ba15296cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.267454 4701 generic.go:334] "Generic (PLEG): container finished" podID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerID="8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898" exitCode=0 Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.267534 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b13d184e-6b2c-4f6b-9466-790ba15296cd","Type":"ContainerDied","Data":"8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898"} Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.267568 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b13d184e-6b2c-4f6b-9466-790ba15296cd","Type":"ContainerDied","Data":"34c7ac4b46625a198068fd3151f743ea2966c7595b203bcf229afb1b83134502"} Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.267592 4701 scope.go:117] "RemoveContainer" containerID="8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.267754 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.278035 4701 generic.go:334] "Generic (PLEG): container finished" podID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" containerID="22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778" exitCode=0 Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.278085 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe","Type":"ContainerDied","Data":"22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778"} Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.284417 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"66b0eb91-a81c-4a68-8e9d-a1042d709e80","Type":"ContainerStarted","Data":"4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a"} Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.284510 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"66b0eb91-a81c-4a68-8e9d-a1042d709e80","Type":"ContainerStarted","Data":"d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a"} Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.284527 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"66b0eb91-a81c-4a68-8e9d-a1042d709e80","Type":"ContainerStarted","Data":"e3ca9d4d7a720b20b5f9744318d11b7eaae17fff73a46b7e2b0a33f8ae26f4b1"} Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.312963 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.312942586 podStartE2EDuration="2.312942586s" podCreationTimestamp="2025-11-21 19:22:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:08.305726253 +0000 UTC m=+1219.090866280" watchObservedRunningTime="2025-11-21 19:22:08.312942586 +0000 UTC m=+1219.098082613" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.413166 4701 scope.go:117] "RemoveContainer" containerID="d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.414718 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.423292 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.444042 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.458478 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.458945 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerName="dnsmasq-dns" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.458959 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerName="dnsmasq-dns" Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.458976 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-log" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.458983 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-log" Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.459009 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerName="init" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.459017 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerName="init" Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.459031 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" containerName="nova-scheduler-scheduler" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.459037 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" containerName="nova-scheduler-scheduler" Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.459082 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-api" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.459089 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-api" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.465317 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" containerName="nova-scheduler-scheduler" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.465355 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-api" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.465372 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" containerName="nova-api-log" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.465390 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4234702-4265-4b3a-ab18-9ba8d244ea33" containerName="dnsmasq-dns" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.466539 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.471063 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.503066 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.507826 4701 scope.go:117] "RemoveContainer" containerID="8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898" Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.509469 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898\": container with ID starting with 8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898 not found: ID does not exist" containerID="8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.509517 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898"} err="failed to get container status \"8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898\": rpc error: code = NotFound desc = could not find container \"8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898\": container with ID starting with 8d55fcdda35709a32f611036657f5e9d553b74a0fc3a035fe38d06e88ba7c898 not found: ID does not exist" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.509548 4701 scope.go:117] "RemoveContainer" containerID="d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640" Nov 21 19:22:08 crc kubenswrapper[4701]: E1121 19:22:08.509960 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640\": container with ID starting with d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640 not found: ID does not exist" containerID="d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.509993 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640"} err="failed to get container status \"d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640\": rpc error: code = NotFound desc = could not find container \"d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640\": container with ID starting with d1d7b61877243190a828bc5214a38c3857e62eb6f56e259f8ac738cc1cecc640 not found: ID does not exist" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.511986 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-config-data\") pod \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.512032 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7l8q\" (UniqueName: \"kubernetes.io/projected/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-kube-api-access-c7l8q\") pod \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.512055 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-combined-ca-bundle\") pod \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\" (UID: \"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.512705 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.512793 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvtpl\" (UniqueName: \"kubernetes.io/projected/17a65da5-6507-473c-9ca1-2df317f54d15-kube-api-access-nvtpl\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.512885 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17a65da5-6507-473c-9ca1-2df317f54d15-logs\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.512961 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-config-data\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.524756 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-kube-api-access-c7l8q" (OuterVolumeSpecName: "kube-api-access-c7l8q") pod "53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" (UID: "53f3873a-a20c-4245-a0a3-a34bd8c5b6fe"). InnerVolumeSpecName "kube-api-access-c7l8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.550195 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-config-data" (OuterVolumeSpecName: "config-data") pod "53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" (UID: "53f3873a-a20c-4245-a0a3-a34bd8c5b6fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.573315 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" (UID: "53f3873a-a20c-4245-a0a3-a34bd8c5b6fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.614844 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.614947 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvtpl\" (UniqueName: \"kubernetes.io/projected/17a65da5-6507-473c-9ca1-2df317f54d15-kube-api-access-nvtpl\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.615004 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17a65da5-6507-473c-9ca1-2df317f54d15-logs\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.615053 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-config-data\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.615159 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.615178 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.615187 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7l8q\" (UniqueName: \"kubernetes.io/projected/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe-kube-api-access-c7l8q\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.615861 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17a65da5-6507-473c-9ca1-2df317f54d15-logs\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.620751 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.626162 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-config-data\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.632477 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvtpl\" (UniqueName: \"kubernetes.io/projected/17a65da5-6507-473c-9ca1-2df317f54d15-kube-api-access-nvtpl\") pod \"nova-api-0\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.680039 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.716828 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ns9ql\" (UniqueName: \"kubernetes.io/projected/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-kube-api-access-ns9ql\") pod \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.716923 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-combined-ca-bundle\") pod \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.717059 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-config-data\") pod \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.717150 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-scripts\") pod \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\" (UID: \"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66\") " Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.722420 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-scripts" (OuterVolumeSpecName: "scripts") pod "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" (UID: "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.728481 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-kube-api-access-ns9ql" (OuterVolumeSpecName: "kube-api-access-ns9ql") pod "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" (UID: "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66"). InnerVolumeSpecName "kube-api-access-ns9ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.746326 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-config-data" (OuterVolumeSpecName: "config-data") pod "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" (UID: "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.748347 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" (UID: "4933c0b9-8f15-4b88-90ea-7fb26f2f4d66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.790597 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.820541 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ns9ql\" (UniqueName: \"kubernetes.io/projected/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-kube-api-access-ns9ql\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.820573 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.820587 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:08 crc kubenswrapper[4701]: I1121 19:22:08.820597 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.296232 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.296251 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-r4vt6" event={"ID":"4933c0b9-8f15-4b88-90ea-7fb26f2f4d66","Type":"ContainerDied","Data":"de8c687b42947b453221bfe75f93a3960e55339e4562dccd90d92f82b517b930"} Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.297127 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de8c687b42947b453221bfe75f93a3960e55339e4562dccd90d92f82b517b930" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.299798 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53f3873a-a20c-4245-a0a3-a34bd8c5b6fe","Type":"ContainerDied","Data":"d6c88c698b228133aefc7176793bb0152c87548204bb712a924e5d3c1133af64"} Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.299864 4701 scope.go:117] "RemoveContainer" containerID="22a14fc572829240174fd2695bfe2ce8e04d886a966dda8f14aab533687bb778" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.300001 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.365461 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.381431 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: E1121 19:22:09.382153 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" containerName="nova-cell1-conductor-db-sync" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.382189 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" containerName="nova-cell1-conductor-db-sync" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.382609 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" containerName="nova-cell1-conductor-db-sync" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.383701 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.388853 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.437619 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.440459 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrlmz\" (UniqueName: \"kubernetes.io/projected/beda6918-af8f-42ab-8599-44f3dc52229f-kube-api-access-rrlmz\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.440640 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beda6918-af8f-42ab-8599-44f3dc52229f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.440733 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/beda6918-af8f-42ab-8599-44f3dc52229f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.460638 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.472095 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.490435 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.492244 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.495731 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.510776 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.543911 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxrxq\" (UniqueName: \"kubernetes.io/projected/dfec6afb-55e4-4efa-8e87-4f68937b2672-kube-api-access-wxrxq\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.544125 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beda6918-af8f-42ab-8599-44f3dc52229f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.544241 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/beda6918-af8f-42ab-8599-44f3dc52229f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.544380 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.544544 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-config-data\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.544634 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrlmz\" (UniqueName: \"kubernetes.io/projected/beda6918-af8f-42ab-8599-44f3dc52229f-kube-api-access-rrlmz\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.552054 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beda6918-af8f-42ab-8599-44f3dc52229f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.554903 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/beda6918-af8f-42ab-8599-44f3dc52229f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.563207 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrlmz\" (UniqueName: \"kubernetes.io/projected/beda6918-af8f-42ab-8599-44f3dc52229f-kube-api-access-rrlmz\") pod \"nova-cell1-conductor-0\" (UID: \"beda6918-af8f-42ab-8599-44f3dc52229f\") " pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.651759 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.654999 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-config-data\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.655795 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxrxq\" (UniqueName: \"kubernetes.io/projected/dfec6afb-55e4-4efa-8e87-4f68937b2672-kube-api-access-wxrxq\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.656421 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.658453 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-config-data\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.671697 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxrxq\" (UniqueName: \"kubernetes.io/projected/dfec6afb-55e4-4efa-8e87-4f68937b2672-kube-api-access-wxrxq\") pod \"nova-scheduler-0\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.748928 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.837165 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.974618 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53f3873a-a20c-4245-a0a3-a34bd8c5b6fe" path="/var/lib/kubelet/pods/53f3873a-a20c-4245-a0a3-a34bd8c5b6fe/volumes" Nov 21 19:22:09 crc kubenswrapper[4701]: I1121 19:22:09.976354 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b13d184e-6b2c-4f6b-9466-790ba15296cd" path="/var/lib/kubelet/pods/b13d184e-6b2c-4f6b-9466-790ba15296cd/volumes" Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.326652 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"17a65da5-6507-473c-9ca1-2df317f54d15","Type":"ContainerStarted","Data":"720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427"} Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.327064 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"17a65da5-6507-473c-9ca1-2df317f54d15","Type":"ContainerStarted","Data":"c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927"} Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.327091 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"17a65da5-6507-473c-9ca1-2df317f54d15","Type":"ContainerStarted","Data":"adca9335f4e22e51d3ca348df97b63c24c6b0b55c0dfb3dd03e5487595561239"} Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.333829 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.350996 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.350967476 podStartE2EDuration="2.350967476s" podCreationTimestamp="2025-11-21 19:22:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:10.349551227 +0000 UTC m=+1221.134691294" watchObservedRunningTime="2025-11-21 19:22:10.350967476 +0000 UTC m=+1221.136107513" Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.453036 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.707016 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:10 crc kubenswrapper[4701]: I1121 19:22:10.745174 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.367594 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"beda6918-af8f-42ab-8599-44f3dc52229f","Type":"ContainerStarted","Data":"7af78eb2c4ce30bd437a572689ba241966b6106609df592ae153f4f858a1f6f0"} Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.368079 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"beda6918-af8f-42ab-8599-44f3dc52229f","Type":"ContainerStarted","Data":"0e4641407ec6edf59d1305c60ac7b70c9e2d5dd1289048c4a82a8ecb7677227a"} Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.368865 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.376280 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dfec6afb-55e4-4efa-8e87-4f68937b2672","Type":"ContainerStarted","Data":"217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238"} Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.376312 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dfec6afb-55e4-4efa-8e87-4f68937b2672","Type":"ContainerStarted","Data":"74ce980ba75b948c25143a2b1ad2b7c6ddd7d08054844b3c74bf034a92fc9ec9"} Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.376919 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.399168 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.399143977 podStartE2EDuration="2.399143977s" podCreationTimestamp="2025-11-21 19:22:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:11.398201082 +0000 UTC m=+1222.183341109" watchObservedRunningTime="2025-11-21 19:22:11.399143977 +0000 UTC m=+1222.184284004" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.419231 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.455388 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.4553655340000002 podStartE2EDuration="2.455365534s" podCreationTimestamp="2025-11-21 19:22:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:11.434452674 +0000 UTC m=+1222.219592701" watchObservedRunningTime="2025-11-21 19:22:11.455365534 +0000 UTC m=+1222.240505561" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.689953 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 19:22:11 crc kubenswrapper[4701]: I1121 19:22:11.691623 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 19:22:14 crc kubenswrapper[4701]: I1121 19:22:14.861700 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 19:22:16 crc kubenswrapper[4701]: I1121 19:22:16.689694 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 19:22:16 crc kubenswrapper[4701]: I1121 19:22:16.690053 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 19:22:17 crc kubenswrapper[4701]: I1121 19:22:17.707485 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:17 crc kubenswrapper[4701]: I1121 19:22:17.707580 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:18 crc kubenswrapper[4701]: I1121 19:22:18.791892 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:22:18 crc kubenswrapper[4701]: I1121 19:22:18.792413 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:22:19 crc kubenswrapper[4701]: I1121 19:22:19.806247 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 21 19:22:19 crc kubenswrapper[4701]: I1121 19:22:19.837526 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 19:22:19 crc kubenswrapper[4701]: I1121 19:22:19.873731 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:19 crc kubenswrapper[4701]: I1121 19:22:19.875635 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:19 crc kubenswrapper[4701]: I1121 19:22:19.900305 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 19:22:20 crc kubenswrapper[4701]: I1121 19:22:20.558002 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 19:22:26 crc kubenswrapper[4701]: I1121 19:22:26.699539 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 19:22:26 crc kubenswrapper[4701]: I1121 19:22:26.700628 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 19:22:26 crc kubenswrapper[4701]: I1121 19:22:26.710894 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 19:22:26 crc kubenswrapper[4701]: I1121 19:22:26.711151 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.364940 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.459155 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-combined-ca-bundle\") pod \"983d33ec-2246-4db3-b4d6-54cca3235d73\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.459452 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9k8c\" (UniqueName: \"kubernetes.io/projected/983d33ec-2246-4db3-b4d6-54cca3235d73-kube-api-access-n9k8c\") pod \"983d33ec-2246-4db3-b4d6-54cca3235d73\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.459561 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-config-data\") pod \"983d33ec-2246-4db3-b4d6-54cca3235d73\" (UID: \"983d33ec-2246-4db3-b4d6-54cca3235d73\") " Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.466322 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/983d33ec-2246-4db3-b4d6-54cca3235d73-kube-api-access-n9k8c" (OuterVolumeSpecName: "kube-api-access-n9k8c") pod "983d33ec-2246-4db3-b4d6-54cca3235d73" (UID: "983d33ec-2246-4db3-b4d6-54cca3235d73"). InnerVolumeSpecName "kube-api-access-n9k8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.498976 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "983d33ec-2246-4db3-b4d6-54cca3235d73" (UID: "983d33ec-2246-4db3-b4d6-54cca3235d73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.517494 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-config-data" (OuterVolumeSpecName: "config-data") pod "983d33ec-2246-4db3-b4d6-54cca3235d73" (UID: "983d33ec-2246-4db3-b4d6-54cca3235d73"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.562531 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.562565 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d33ec-2246-4db3-b4d6-54cca3235d73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.562582 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9k8c\" (UniqueName: \"kubernetes.io/projected/983d33ec-2246-4db3-b4d6-54cca3235d73-kube-api-access-n9k8c\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.616134 4701 generic.go:334] "Generic (PLEG): container finished" podID="983d33ec-2246-4db3-b4d6-54cca3235d73" containerID="bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615" exitCode=137 Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.616879 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"983d33ec-2246-4db3-b4d6-54cca3235d73","Type":"ContainerDied","Data":"bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615"} Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.617001 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"983d33ec-2246-4db3-b4d6-54cca3235d73","Type":"ContainerDied","Data":"6c2332d37dda340665afd0fc5af4702b020701c6cfca11b67251bbd2c12dfe63"} Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.617049 4701 scope.go:117] "RemoveContainer" containerID="bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.617071 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.665896 4701 scope.go:117] "RemoveContainer" containerID="bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615" Nov 21 19:22:27 crc kubenswrapper[4701]: E1121 19:22:27.667248 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615\": container with ID starting with bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615 not found: ID does not exist" containerID="bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.667309 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615"} err="failed to get container status \"bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615\": rpc error: code = NotFound desc = could not find container \"bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615\": container with ID starting with bf340e072671d82328b957df1c39479c00b9cd5998634d839e725ca1fdfa0615 not found: ID does not exist" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.679235 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.692685 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.707714 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:22:27 crc kubenswrapper[4701]: E1121 19:22:27.708369 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983d33ec-2246-4db3-b4d6-54cca3235d73" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.708388 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="983d33ec-2246-4db3-b4d6-54cca3235d73" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.708701 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="983d33ec-2246-4db3-b4d6-54cca3235d73" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.709685 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.712704 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.713389 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.716120 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.728811 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.870436 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxw4h\" (UniqueName: \"kubernetes.io/projected/43fdb14d-22e9-469a-b6cb-00477daa5ece-kube-api-access-wxw4h\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.870480 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.870585 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.870642 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.870668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.964234 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="983d33ec-2246-4db3-b4d6-54cca3235d73" path="/var/lib/kubelet/pods/983d33ec-2246-4db3-b4d6-54cca3235d73/volumes" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.972508 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.972580 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.972610 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.972866 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxw4h\" (UniqueName: \"kubernetes.io/projected/43fdb14d-22e9-469a-b6cb-00477daa5ece-kube-api-access-wxw4h\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.972893 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.978681 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.979095 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.979695 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:27 crc kubenswrapper[4701]: I1121 19:22:27.987829 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fdb14d-22e9-469a-b6cb-00477daa5ece-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.003254 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxw4h\" (UniqueName: \"kubernetes.io/projected/43fdb14d-22e9-469a-b6cb-00477daa5ece-kube-api-access-wxw4h\") pod \"nova-cell1-novncproxy-0\" (UID: \"43fdb14d-22e9-469a-b6cb-00477daa5ece\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.045990 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.625997 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 19:22:28 crc kubenswrapper[4701]: W1121 19:22:28.628007 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43fdb14d_22e9_469a_b6cb_00477daa5ece.slice/crio-25a97fbbb36bbc585aa3455f43b6b96827a4b419baa00939b1308bf417f735c5 WatchSource:0}: Error finding container 25a97fbbb36bbc585aa3455f43b6b96827a4b419baa00939b1308bf417f735c5: Status 404 returned error can't find the container with id 25a97fbbb36bbc585aa3455f43b6b96827a4b419baa00939b1308bf417f735c5 Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.806428 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.806526 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.807342 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.807507 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.822442 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 19:22:28 crc kubenswrapper[4701]: I1121 19:22:28.839965 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.067815 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77cbdf4f4c-z72nc"] Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.071114 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.096552 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77cbdf4f4c-z72nc"] Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.220505 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-sb\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.220558 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phwzq\" (UniqueName: \"kubernetes.io/projected/5bc9a4f7-dbee-40f5-abff-15038163a9a4-kube-api-access-phwzq\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.220626 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-config\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.220664 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-swift-storage-0\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.220692 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-nb\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.220757 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-svc\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.324718 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-svc\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.324832 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-sb\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.324858 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phwzq\" (UniqueName: \"kubernetes.io/projected/5bc9a4f7-dbee-40f5-abff-15038163a9a4-kube-api-access-phwzq\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.324906 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-config\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.324941 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-swift-storage-0\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.324968 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-nb\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.325974 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-nb\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.325975 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-config\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.326395 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-svc\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.326546 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-swift-storage-0\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.326721 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-sb\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.400504 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phwzq\" (UniqueName: \"kubernetes.io/projected/5bc9a4f7-dbee-40f5-abff-15038163a9a4-kube-api-access-phwzq\") pod \"dnsmasq-dns-77cbdf4f4c-z72nc\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.432381 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.654086 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"43fdb14d-22e9-469a-b6cb-00477daa5ece","Type":"ContainerStarted","Data":"a37a4f0c1638c0814e064c54e517b09495d408196ae4f35b42e87baecf4acb6e"} Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.654494 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"43fdb14d-22e9-469a-b6cb-00477daa5ece","Type":"ContainerStarted","Data":"25a97fbbb36bbc585aa3455f43b6b96827a4b419baa00939b1308bf417f735c5"} Nov 21 19:22:29 crc kubenswrapper[4701]: I1121 19:22:29.695607 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.695584202 podStartE2EDuration="2.695584202s" podCreationTimestamp="2025-11-21 19:22:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:29.677568764 +0000 UTC m=+1240.462708791" watchObservedRunningTime="2025-11-21 19:22:29.695584202 +0000 UTC m=+1240.480724219" Nov 21 19:22:30 crc kubenswrapper[4701]: I1121 19:22:30.013978 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77cbdf4f4c-z72nc"] Nov 21 19:22:30 crc kubenswrapper[4701]: I1121 19:22:30.663796 4701 generic.go:334] "Generic (PLEG): container finished" podID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerID="88752033be19614c3741bc84cbade0dd93e11a2d41bc18edf4ed8a424babc476" exitCode=0 Nov 21 19:22:30 crc kubenswrapper[4701]: I1121 19:22:30.663968 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" event={"ID":"5bc9a4f7-dbee-40f5-abff-15038163a9a4","Type":"ContainerDied","Data":"88752033be19614c3741bc84cbade0dd93e11a2d41bc18edf4ed8a424babc476"} Nov 21 19:22:30 crc kubenswrapper[4701]: I1121 19:22:30.664555 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" event={"ID":"5bc9a4f7-dbee-40f5-abff-15038163a9a4","Type":"ContainerStarted","Data":"071ea593ce41110b292286e13c7a21efc6ad8ec3572f089eabf14d711119ca42"} Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.629385 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.630324 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-central-agent" containerID="cri-o://d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e" gracePeriod=30 Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.631046 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="sg-core" containerID="cri-o://bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633" gracePeriod=30 Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.631244 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-notification-agent" containerID="cri-o://0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669" gracePeriod=30 Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.631288 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="proxy-httpd" containerID="cri-o://415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245" gracePeriod=30 Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.654548 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.209:3000/\": EOF" Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.688171 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" event={"ID":"5bc9a4f7-dbee-40f5-abff-15038163a9a4","Type":"ContainerStarted","Data":"e00f3227823f34af71cf8759d1fb4b560719497d2e5b56c9c4793581bd481ca0"} Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.688441 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:31 crc kubenswrapper[4701]: I1121 19:22:31.729538 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" podStartSLOduration=2.7295177969999997 podStartE2EDuration="2.729517797s" podCreationTimestamp="2025-11-21 19:22:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:31.725996452 +0000 UTC m=+1242.511136469" watchObservedRunningTime="2025-11-21 19:22:31.729517797 +0000 UTC m=+1242.514657824" Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.003705 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.209:3000/\": dial tcp 10.217.0.209:3000: connect: connection refused" Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.039828 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.040089 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-log" containerID="cri-o://c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927" gracePeriod=30 Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.041227 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-api" containerID="cri-o://720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427" gracePeriod=30 Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.742342 4701 generic.go:334] "Generic (PLEG): container finished" podID="1861bc37-79d8-49a4-a995-34c76712a44d" containerID="415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245" exitCode=0 Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.742775 4701 generic.go:334] "Generic (PLEG): container finished" podID="1861bc37-79d8-49a4-a995-34c76712a44d" containerID="bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633" exitCode=2 Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.742786 4701 generic.go:334] "Generic (PLEG): container finished" podID="1861bc37-79d8-49a4-a995-34c76712a44d" containerID="d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e" exitCode=0 Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.742421 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerDied","Data":"415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245"} Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.742923 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerDied","Data":"bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633"} Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.742940 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerDied","Data":"d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e"} Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.750033 4701 generic.go:334] "Generic (PLEG): container finished" podID="17a65da5-6507-473c-9ca1-2df317f54d15" containerID="c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927" exitCode=143 Nov 21 19:22:32 crc kubenswrapper[4701]: I1121 19:22:32.750360 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"17a65da5-6507-473c-9ca1-2df317f54d15","Type":"ContainerDied","Data":"c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927"} Nov 21 19:22:33 crc kubenswrapper[4701]: I1121 19:22:33.046543 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.310886 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.454393 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-combined-ca-bundle\") pod \"17a65da5-6507-473c-9ca1-2df317f54d15\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.454498 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvtpl\" (UniqueName: \"kubernetes.io/projected/17a65da5-6507-473c-9ca1-2df317f54d15-kube-api-access-nvtpl\") pod \"17a65da5-6507-473c-9ca1-2df317f54d15\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.454522 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-config-data\") pod \"17a65da5-6507-473c-9ca1-2df317f54d15\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.454702 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17a65da5-6507-473c-9ca1-2df317f54d15-logs\") pod \"17a65da5-6507-473c-9ca1-2df317f54d15\" (UID: \"17a65da5-6507-473c-9ca1-2df317f54d15\") " Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.455727 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17a65da5-6507-473c-9ca1-2df317f54d15-logs" (OuterVolumeSpecName: "logs") pod "17a65da5-6507-473c-9ca1-2df317f54d15" (UID: "17a65da5-6507-473c-9ca1-2df317f54d15"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.464486 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17a65da5-6507-473c-9ca1-2df317f54d15-kube-api-access-nvtpl" (OuterVolumeSpecName: "kube-api-access-nvtpl") pod "17a65da5-6507-473c-9ca1-2df317f54d15" (UID: "17a65da5-6507-473c-9ca1-2df317f54d15"). InnerVolumeSpecName "kube-api-access-nvtpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.498720 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-config-data" (OuterVolumeSpecName: "config-data") pod "17a65da5-6507-473c-9ca1-2df317f54d15" (UID: "17a65da5-6507-473c-9ca1-2df317f54d15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.500935 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17a65da5-6507-473c-9ca1-2df317f54d15" (UID: "17a65da5-6507-473c-9ca1-2df317f54d15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.557109 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.557139 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvtpl\" (UniqueName: \"kubernetes.io/projected/17a65da5-6507-473c-9ca1-2df317f54d15-kube-api-access-nvtpl\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.557153 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17a65da5-6507-473c-9ca1-2df317f54d15-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.557163 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17a65da5-6507-473c-9ca1-2df317f54d15-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.783681 4701 generic.go:334] "Generic (PLEG): container finished" podID="17a65da5-6507-473c-9ca1-2df317f54d15" containerID="720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427" exitCode=0 Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.783740 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"17a65da5-6507-473c-9ca1-2df317f54d15","Type":"ContainerDied","Data":"720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427"} Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.783788 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.784480 4701 scope.go:117] "RemoveContainer" containerID="720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.784457 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"17a65da5-6507-473c-9ca1-2df317f54d15","Type":"ContainerDied","Data":"adca9335f4e22e51d3ca348df97b63c24c6b0b55c0dfb3dd03e5487595561239"} Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.825940 4701 scope.go:117] "RemoveContainer" containerID="c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.853929 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.880260 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.887835 4701 scope.go:117] "RemoveContainer" containerID="720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427" Nov 21 19:22:34 crc kubenswrapper[4701]: E1121 19:22:34.890740 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427\": container with ID starting with 720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427 not found: ID does not exist" containerID="720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.890798 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427"} err="failed to get container status \"720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427\": rpc error: code = NotFound desc = could not find container \"720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427\": container with ID starting with 720a2d8a54b520e53d21213b17a3ee1edd60191cc789e49881b207603e869427 not found: ID does not exist" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.890833 4701 scope.go:117] "RemoveContainer" containerID="c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927" Nov 21 19:22:34 crc kubenswrapper[4701]: E1121 19:22:34.894635 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927\": container with ID starting with c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927 not found: ID does not exist" containerID="c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.894695 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927"} err="failed to get container status \"c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927\": rpc error: code = NotFound desc = could not find container \"c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927\": container with ID starting with c278ba511fced5bf6402c6b0cf373fef9001ce2954ed0f78a49358eaaa52b927 not found: ID does not exist" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.909602 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:34 crc kubenswrapper[4701]: E1121 19:22:34.910317 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-log" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.910333 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-log" Nov 21 19:22:34 crc kubenswrapper[4701]: E1121 19:22:34.910365 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-api" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.910372 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-api" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.910605 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-log" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.910620 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" containerName="nova-api-api" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.912083 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.925269 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.925570 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.925708 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.968990 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a5a04ce-27e1-4099-831f-aa3494125d7b-logs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.969117 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz4tv\" (UniqueName: \"kubernetes.io/projected/9a5a04ce-27e1-4099-831f-aa3494125d7b-kube-api-access-xz4tv\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.969153 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.969178 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-config-data\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.969292 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.969315 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:34 crc kubenswrapper[4701]: I1121 19:22:34.985285 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.074091 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.074165 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.074266 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a5a04ce-27e1-4099-831f-aa3494125d7b-logs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.074388 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz4tv\" (UniqueName: \"kubernetes.io/projected/9a5a04ce-27e1-4099-831f-aa3494125d7b-kube-api-access-xz4tv\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.074436 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.074466 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-config-data\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.079940 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a5a04ce-27e1-4099-831f-aa3494125d7b-logs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.116153 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-config-data\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.117439 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.117775 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.125844 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz4tv\" (UniqueName: \"kubernetes.io/projected/9a5a04ce-27e1-4099-831f-aa3494125d7b-kube-api-access-xz4tv\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.129775 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.254261 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.788752 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.803136 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9a5a04ce-27e1-4099-831f-aa3494125d7b","Type":"ContainerStarted","Data":"1c1c3815fdff4ae338997415b6b2802cfeb5ab5e395473b4056e2b6bdd336997"} Nov 21 19:22:35 crc kubenswrapper[4701]: I1121 19:22:35.965490 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17a65da5-6507-473c-9ca1-2df317f54d15" path="/var/lib/kubelet/pods/17a65da5-6507-473c-9ca1-2df317f54d15/volumes" Nov 21 19:22:36 crc kubenswrapper[4701]: I1121 19:22:36.820755 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9a5a04ce-27e1-4099-831f-aa3494125d7b","Type":"ContainerStarted","Data":"6a9a4eda5c857b53227bfd37554da4e92e383f24e58ef0b8052a51c0e27ab5f6"} Nov 21 19:22:36 crc kubenswrapper[4701]: I1121 19:22:36.821649 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9a5a04ce-27e1-4099-831f-aa3494125d7b","Type":"ContainerStarted","Data":"397fdc3497ee13248f8b9a7ac5250d7df9b7a80eb6b6ffff0e59de7fe4f295cd"} Nov 21 19:22:36 crc kubenswrapper[4701]: I1121 19:22:36.861842 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8618176760000003 podStartE2EDuration="2.861817676s" podCreationTimestamp="2025-11-21 19:22:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:36.858694612 +0000 UTC m=+1247.643834679" watchObservedRunningTime="2025-11-21 19:22:36.861817676 +0000 UTC m=+1247.646957713" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.320848 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.444712 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-sg-core-conf-yaml\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.445381 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvrz4\" (UniqueName: \"kubernetes.io/projected/1861bc37-79d8-49a4-a995-34c76712a44d-kube-api-access-cvrz4\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.445741 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-config-data\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.446144 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-scripts\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.446412 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-log-httpd\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.446669 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-run-httpd\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.446941 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-combined-ca-bundle\") pod \"1861bc37-79d8-49a4-a995-34c76712a44d\" (UID: \"1861bc37-79d8-49a4-a995-34c76712a44d\") " Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.447272 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.448072 4701 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.448092 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.454000 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-scripts" (OuterVolumeSpecName: "scripts") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.458405 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1861bc37-79d8-49a4-a995-34c76712a44d-kube-api-access-cvrz4" (OuterVolumeSpecName: "kube-api-access-cvrz4") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "kube-api-access-cvrz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.488015 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.551472 4701 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.551521 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvrz4\" (UniqueName: \"kubernetes.io/projected/1861bc37-79d8-49a4-a995-34c76712a44d-kube-api-access-cvrz4\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.551538 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.551551 4701 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1861bc37-79d8-49a4-a995-34c76712a44d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.563654 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.613392 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-config-data" (OuterVolumeSpecName: "config-data") pod "1861bc37-79d8-49a4-a995-34c76712a44d" (UID: "1861bc37-79d8-49a4-a995-34c76712a44d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.654476 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.654521 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1861bc37-79d8-49a4-a995-34c76712a44d-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.843069 4701 generic.go:334] "Generic (PLEG): container finished" podID="1861bc37-79d8-49a4-a995-34c76712a44d" containerID="0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669" exitCode=0 Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.843280 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.843272 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerDied","Data":"0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669"} Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.843400 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1861bc37-79d8-49a4-a995-34c76712a44d","Type":"ContainerDied","Data":"9a2efe9c2bbbdc4cb2a0329ec95309ac3da672bebd5bcf6f44314658e8e0bd32"} Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.843448 4701 scope.go:117] "RemoveContainer" containerID="415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.894795 4701 scope.go:117] "RemoveContainer" containerID="bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.912339 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.939383 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.943004 4701 scope.go:117] "RemoveContainer" containerID="0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.971180 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:37 crc kubenswrapper[4701]: E1121 19:22:37.971940 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="sg-core" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.971978 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="sg-core" Nov 21 19:22:37 crc kubenswrapper[4701]: E1121 19:22:37.972015 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-notification-agent" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972028 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-notification-agent" Nov 21 19:22:37 crc kubenswrapper[4701]: E1121 19:22:37.972046 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-central-agent" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972059 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-central-agent" Nov 21 19:22:37 crc kubenswrapper[4701]: E1121 19:22:37.972103 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="proxy-httpd" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972116 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="proxy-httpd" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972494 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="sg-core" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972542 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-central-agent" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972569 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="proxy-httpd" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.972609 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" containerName="ceilometer-notification-agent" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.975593 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.975736 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.994706 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:22:37 crc kubenswrapper[4701]: I1121 19:22:37.995131 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.020894 4701 scope.go:117] "RemoveContainer" containerID="d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.042638 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1861bc37-79d8-49a4-a995-34c76712a44d" path="/var/lib/kubelet/pods/1861bc37-79d8-49a4-a995-34c76712a44d/volumes" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.046961 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.064952 4701 scope.go:117] "RemoveContainer" containerID="415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245" Nov 21 19:22:38 crc kubenswrapper[4701]: E1121 19:22:38.065416 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245\": container with ID starting with 415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245 not found: ID does not exist" containerID="415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.065482 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245"} err="failed to get container status \"415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245\": rpc error: code = NotFound desc = could not find container \"415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245\": container with ID starting with 415b0ee73cbb8f1f276f03b7fdedca99b38b65db657f82dd60a3d1d5cf72f245 not found: ID does not exist" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.065519 4701 scope.go:117] "RemoveContainer" containerID="bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633" Nov 21 19:22:38 crc kubenswrapper[4701]: E1121 19:22:38.065890 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633\": container with ID starting with bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633 not found: ID does not exist" containerID="bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.065938 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633"} err="failed to get container status \"bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633\": rpc error: code = NotFound desc = could not find container \"bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633\": container with ID starting with bd6b5a30789d106b288fc08410b4aabaf989f180119898b847c259f236256633 not found: ID does not exist" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.065964 4701 scope.go:117] "RemoveContainer" containerID="0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669" Nov 21 19:22:38 crc kubenswrapper[4701]: E1121 19:22:38.066477 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669\": container with ID starting with 0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669 not found: ID does not exist" containerID="0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.066511 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669"} err="failed to get container status \"0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669\": rpc error: code = NotFound desc = could not find container \"0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669\": container with ID starting with 0bb9e1a5c37e9a2877ace5665e856b8f8daff6ddb6ecaf5c979bffa88f102669 not found: ID does not exist" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.066537 4701 scope.go:117] "RemoveContainer" containerID="d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e" Nov 21 19:22:38 crc kubenswrapper[4701]: E1121 19:22:38.066800 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e\": container with ID starting with d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e not found: ID does not exist" containerID="d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.066830 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e"} err="failed to get container status \"d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e\": rpc error: code = NotFound desc = could not find container \"d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e\": container with ID starting with d1a7286887da00ec2f7b593a56162391f36f2f38b35d647ed5ebeaf62086396e not found: ID does not exist" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.075696 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.100369 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-run-httpd\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.100453 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gj6q\" (UniqueName: \"kubernetes.io/projected/89bc2158-a924-4e24-8c88-bc981495b7ab-kube-api-access-5gj6q\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.100489 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-log-httpd\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.100745 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-config-data\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.100989 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.101100 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-scripts\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.101184 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203138 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gj6q\" (UniqueName: \"kubernetes.io/projected/89bc2158-a924-4e24-8c88-bc981495b7ab-kube-api-access-5gj6q\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203232 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-log-httpd\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203284 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-config-data\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203349 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203380 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-scripts\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203398 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.203520 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-run-httpd\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.204048 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-log-httpd\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.205742 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-run-httpd\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.211267 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-scripts\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.222837 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.223304 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-config-data\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.225633 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.226714 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gj6q\" (UniqueName: \"kubernetes.io/projected/89bc2158-a924-4e24-8c88-bc981495b7ab-kube-api-access-5gj6q\") pod \"ceilometer-0\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.329312 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.662449 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.859258 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerStarted","Data":"09918835caba4284fb3b5d69b238b462d9a71b1697719df9479229a9a867519b"} Nov 21 19:22:38 crc kubenswrapper[4701]: I1121 19:22:38.887461 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.192855 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-x5x89"] Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.195167 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.201689 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.201987 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.211543 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-x5x89"] Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.335966 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w5sj\" (UniqueName: \"kubernetes.io/projected/f37895e6-74f4-4d54-a0b8-232c5ba5c018-kube-api-access-8w5sj\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.336398 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-config-data\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.336428 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.336466 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-scripts\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.435183 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.437958 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w5sj\" (UniqueName: \"kubernetes.io/projected/f37895e6-74f4-4d54-a0b8-232c5ba5c018-kube-api-access-8w5sj\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.438054 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-config-data\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.438097 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.438158 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-scripts\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.445905 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-config-data\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.446872 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-scripts\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.458972 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.464127 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w5sj\" (UniqueName: \"kubernetes.io/projected/f37895e6-74f4-4d54-a0b8-232c5ba5c018-kube-api-access-8w5sj\") pod \"nova-cell1-cell-mapping-x5x89\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.510299 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66995857cf-dk6r8"] Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.510654 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerName="dnsmasq-dns" containerID="cri-o://9ededf9eb8775b1266dfe37291abc07e8343805f8fd852f9de840631df2dfd88" gracePeriod=10 Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.596799 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.872739 4701 generic.go:334] "Generic (PLEG): container finished" podID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerID="9ededf9eb8775b1266dfe37291abc07e8343805f8fd852f9de840631df2dfd88" exitCode=0 Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.873177 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" event={"ID":"e9e1aa13-3797-4779-b94f-f39f2e4fcabd","Type":"ContainerDied","Data":"9ededf9eb8775b1266dfe37291abc07e8343805f8fd852f9de840631df2dfd88"} Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.890292 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerStarted","Data":"3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5"} Nov 21 19:22:39 crc kubenswrapper[4701]: I1121 19:22:39.890501 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerStarted","Data":"c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca"} Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.249643 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-x5x89"] Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.457873 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.564058 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-svc\") pod \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.564527 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-sb\") pod \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.564720 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-config\") pod \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.564893 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-swift-storage-0\") pod \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.565089 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2s7zg\" (UniqueName: \"kubernetes.io/projected/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-kube-api-access-2s7zg\") pod \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.565301 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-nb\") pod \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\" (UID: \"e9e1aa13-3797-4779-b94f-f39f2e4fcabd\") " Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.569575 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-kube-api-access-2s7zg" (OuterVolumeSpecName: "kube-api-access-2s7zg") pod "e9e1aa13-3797-4779-b94f-f39f2e4fcabd" (UID: "e9e1aa13-3797-4779-b94f-f39f2e4fcabd"). InnerVolumeSpecName "kube-api-access-2s7zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.627277 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e9e1aa13-3797-4779-b94f-f39f2e4fcabd" (UID: "e9e1aa13-3797-4779-b94f-f39f2e4fcabd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.638542 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e9e1aa13-3797-4779-b94f-f39f2e4fcabd" (UID: "e9e1aa13-3797-4779-b94f-f39f2e4fcabd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.641952 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-config" (OuterVolumeSpecName: "config") pod "e9e1aa13-3797-4779-b94f-f39f2e4fcabd" (UID: "e9e1aa13-3797-4779-b94f-f39f2e4fcabd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.649008 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e9e1aa13-3797-4779-b94f-f39f2e4fcabd" (UID: "e9e1aa13-3797-4779-b94f-f39f2e4fcabd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.649234 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e9e1aa13-3797-4779-b94f-f39f2e4fcabd" (UID: "e9e1aa13-3797-4779-b94f-f39f2e4fcabd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.668663 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2s7zg\" (UniqueName: \"kubernetes.io/projected/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-kube-api-access-2s7zg\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.668713 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.668728 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.668747 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.668757 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.668770 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9e1aa13-3797-4779-b94f-f39f2e4fcabd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.907242 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" event={"ID":"e9e1aa13-3797-4779-b94f-f39f2e4fcabd","Type":"ContainerDied","Data":"bea87ad9b77312423857e41341f7e42d7b7fcf6f7326feef9db9f128c53e4bb5"} Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.907302 4701 scope.go:117] "RemoveContainer" containerID="9ededf9eb8775b1266dfe37291abc07e8343805f8fd852f9de840631df2dfd88" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.908574 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66995857cf-dk6r8" Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.918716 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x5x89" event={"ID":"f37895e6-74f4-4d54-a0b8-232c5ba5c018","Type":"ContainerStarted","Data":"ca6070a3011b2d28c3839bc8413f71f64dfb43f29762867e4ce4f2321913c59d"} Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.918776 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x5x89" event={"ID":"f37895e6-74f4-4d54-a0b8-232c5ba5c018","Type":"ContainerStarted","Data":"e5c2bc7103f6d2ca70726e65dea6b06b2ac30963c808973e7983a0fccb0de493"} Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.928622 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerStarted","Data":"8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6"} Nov 21 19:22:40 crc kubenswrapper[4701]: I1121 19:22:40.958510 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-x5x89" podStartSLOduration=1.958489537 podStartE2EDuration="1.958489537s" podCreationTimestamp="2025-11-21 19:22:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:40.947442458 +0000 UTC m=+1251.732582485" watchObservedRunningTime="2025-11-21 19:22:40.958489537 +0000 UTC m=+1251.743629564" Nov 21 19:22:41 crc kubenswrapper[4701]: I1121 19:22:41.005956 4701 scope.go:117] "RemoveContainer" containerID="96bc60dfeec492a374d47d3ec95cdf5a8880ba575c2f238544353e0117262340" Nov 21 19:22:41 crc kubenswrapper[4701]: I1121 19:22:41.011618 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66995857cf-dk6r8"] Nov 21 19:22:41 crc kubenswrapper[4701]: I1121 19:22:41.024971 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66995857cf-dk6r8"] Nov 21 19:22:41 crc kubenswrapper[4701]: I1121 19:22:41.971018 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" path="/var/lib/kubelet/pods/e9e1aa13-3797-4779-b94f-f39f2e4fcabd/volumes" Nov 21 19:22:41 crc kubenswrapper[4701]: I1121 19:22:41.974662 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:22:41 crc kubenswrapper[4701]: I1121 19:22:41.974719 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerStarted","Data":"1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9"} Nov 21 19:22:42 crc kubenswrapper[4701]: I1121 19:22:42.022153 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.37021614 podStartE2EDuration="5.022117005s" podCreationTimestamp="2025-11-21 19:22:37 +0000 UTC" firstStartedPulling="2025-11-21 19:22:38.67299694 +0000 UTC m=+1249.458136967" lastFinishedPulling="2025-11-21 19:22:41.324897775 +0000 UTC m=+1252.110037832" observedRunningTime="2025-11-21 19:22:42.001390033 +0000 UTC m=+1252.786530100" watchObservedRunningTime="2025-11-21 19:22:42.022117005 +0000 UTC m=+1252.807257042" Nov 21 19:22:45 crc kubenswrapper[4701]: I1121 19:22:45.255526 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:22:45 crc kubenswrapper[4701]: I1121 19:22:45.256297 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:22:46 crc kubenswrapper[4701]: I1121 19:22:46.274502 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.216:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:46 crc kubenswrapper[4701]: I1121 19:22:46.274494 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.216:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:22:47 crc kubenswrapper[4701]: I1121 19:22:47.050968 4701 generic.go:334] "Generic (PLEG): container finished" podID="f37895e6-74f4-4d54-a0b8-232c5ba5c018" containerID="ca6070a3011b2d28c3839bc8413f71f64dfb43f29762867e4ce4f2321913c59d" exitCode=0 Nov 21 19:22:47 crc kubenswrapper[4701]: I1121 19:22:47.051147 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x5x89" event={"ID":"f37895e6-74f4-4d54-a0b8-232c5ba5c018","Type":"ContainerDied","Data":"ca6070a3011b2d28c3839bc8413f71f64dfb43f29762867e4ce4f2321913c59d"} Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.539894 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.586611 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-config-data\") pod \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.586720 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-scripts\") pod \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.587001 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w5sj\" (UniqueName: \"kubernetes.io/projected/f37895e6-74f4-4d54-a0b8-232c5ba5c018-kube-api-access-8w5sj\") pod \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.587041 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-combined-ca-bundle\") pod \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\" (UID: \"f37895e6-74f4-4d54-a0b8-232c5ba5c018\") " Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.595231 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f37895e6-74f4-4d54-a0b8-232c5ba5c018-kube-api-access-8w5sj" (OuterVolumeSpecName: "kube-api-access-8w5sj") pod "f37895e6-74f4-4d54-a0b8-232c5ba5c018" (UID: "f37895e6-74f4-4d54-a0b8-232c5ba5c018"). InnerVolumeSpecName "kube-api-access-8w5sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.596470 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-scripts" (OuterVolumeSpecName: "scripts") pod "f37895e6-74f4-4d54-a0b8-232c5ba5c018" (UID: "f37895e6-74f4-4d54-a0b8-232c5ba5c018"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.620840 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f37895e6-74f4-4d54-a0b8-232c5ba5c018" (UID: "f37895e6-74f4-4d54-a0b8-232c5ba5c018"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.632301 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-config-data" (OuterVolumeSpecName: "config-data") pod "f37895e6-74f4-4d54-a0b8-232c5ba5c018" (UID: "f37895e6-74f4-4d54-a0b8-232c5ba5c018"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.693724 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w5sj\" (UniqueName: \"kubernetes.io/projected/f37895e6-74f4-4d54-a0b8-232c5ba5c018-kube-api-access-8w5sj\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.693792 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.693811 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:48 crc kubenswrapper[4701]: I1121 19:22:48.693829 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f37895e6-74f4-4d54-a0b8-232c5ba5c018-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.090184 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x5x89" event={"ID":"f37895e6-74f4-4d54-a0b8-232c5ba5c018","Type":"ContainerDied","Data":"e5c2bc7103f6d2ca70726e65dea6b06b2ac30963c808973e7983a0fccb0de493"} Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.090892 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5c2bc7103f6d2ca70726e65dea6b06b2ac30963c808973e7983a0fccb0de493" Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.090610 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x5x89" Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.310552 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.310990 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-log" containerID="cri-o://397fdc3497ee13248f8b9a7ac5250d7df9b7a80eb6b6ffff0e59de7fe4f295cd" gracePeriod=30 Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.311129 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-api" containerID="cri-o://6a9a4eda5c857b53227bfd37554da4e92e383f24e58ef0b8052a51c0e27ab5f6" gracePeriod=30 Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.337406 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.337760 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="dfec6afb-55e4-4efa-8e87-4f68937b2672" containerName="nova-scheduler-scheduler" containerID="cri-o://217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" gracePeriod=30 Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.455873 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.456143 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-log" containerID="cri-o://d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a" gracePeriod=30 Nov 21 19:22:49 crc kubenswrapper[4701]: I1121 19:22:49.456331 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-metadata" containerID="cri-o://4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a" gracePeriod=30 Nov 21 19:22:49 crc kubenswrapper[4701]: E1121 19:22:49.840524 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 19:22:49 crc kubenswrapper[4701]: E1121 19:22:49.842239 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 19:22:49 crc kubenswrapper[4701]: E1121 19:22:49.847643 4701 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 19:22:49 crc kubenswrapper[4701]: E1121 19:22:49.847701 4701 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="dfec6afb-55e4-4efa-8e87-4f68937b2672" containerName="nova-scheduler-scheduler" Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.113759 4701 generic.go:334] "Generic (PLEG): container finished" podID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerID="d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a" exitCode=143 Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.113855 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"66b0eb91-a81c-4a68-8e9d-a1042d709e80","Type":"ContainerDied","Data":"d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a"} Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.122739 4701 generic.go:334] "Generic (PLEG): container finished" podID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerID="397fdc3497ee13248f8b9a7ac5250d7df9b7a80eb6b6ffff0e59de7fe4f295cd" exitCode=143 Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.122842 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9a5a04ce-27e1-4099-831f-aa3494125d7b","Type":"ContainerDied","Data":"397fdc3497ee13248f8b9a7ac5250d7df9b7a80eb6b6ffff0e59de7fe4f295cd"} Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.851725 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.950184 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-config-data\") pod \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.950319 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7q5m\" (UniqueName: \"kubernetes.io/projected/66b0eb91-a81c-4a68-8e9d-a1042d709e80-kube-api-access-p7q5m\") pod \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.950453 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b0eb91-a81c-4a68-8e9d-a1042d709e80-logs\") pod \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.950560 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-nova-metadata-tls-certs\") pod \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.950631 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-combined-ca-bundle\") pod \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\" (UID: \"66b0eb91-a81c-4a68-8e9d-a1042d709e80\") " Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.953162 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66b0eb91-a81c-4a68-8e9d-a1042d709e80-logs" (OuterVolumeSpecName: "logs") pod "66b0eb91-a81c-4a68-8e9d-a1042d709e80" (UID: "66b0eb91-a81c-4a68-8e9d-a1042d709e80"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:50 crc kubenswrapper[4701]: I1121 19:22:50.969975 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66b0eb91-a81c-4a68-8e9d-a1042d709e80-kube-api-access-p7q5m" (OuterVolumeSpecName: "kube-api-access-p7q5m") pod "66b0eb91-a81c-4a68-8e9d-a1042d709e80" (UID: "66b0eb91-a81c-4a68-8e9d-a1042d709e80"). InnerVolumeSpecName "kube-api-access-p7q5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.000430 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-config-data" (OuterVolumeSpecName: "config-data") pod "66b0eb91-a81c-4a68-8e9d-a1042d709e80" (UID: "66b0eb91-a81c-4a68-8e9d-a1042d709e80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.007272 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66b0eb91-a81c-4a68-8e9d-a1042d709e80" (UID: "66b0eb91-a81c-4a68-8e9d-a1042d709e80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.045109 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "66b0eb91-a81c-4a68-8e9d-a1042d709e80" (UID: "66b0eb91-a81c-4a68-8e9d-a1042d709e80"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.056021 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.056066 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7q5m\" (UniqueName: \"kubernetes.io/projected/66b0eb91-a81c-4a68-8e9d-a1042d709e80-kube-api-access-p7q5m\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.056080 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b0eb91-a81c-4a68-8e9d-a1042d709e80-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.056093 4701 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.056106 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b0eb91-a81c-4a68-8e9d-a1042d709e80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.136576 4701 generic.go:334] "Generic (PLEG): container finished" podID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerID="6a9a4eda5c857b53227bfd37554da4e92e383f24e58ef0b8052a51c0e27ab5f6" exitCode=0 Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.136628 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9a5a04ce-27e1-4099-831f-aa3494125d7b","Type":"ContainerDied","Data":"6a9a4eda5c857b53227bfd37554da4e92e383f24e58ef0b8052a51c0e27ab5f6"} Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.138128 4701 generic.go:334] "Generic (PLEG): container finished" podID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerID="4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a" exitCode=0 Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.138153 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"66b0eb91-a81c-4a68-8e9d-a1042d709e80","Type":"ContainerDied","Data":"4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a"} Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.138169 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"66b0eb91-a81c-4a68-8e9d-a1042d709e80","Type":"ContainerDied","Data":"e3ca9d4d7a720b20b5f9744318d11b7eaae17fff73a46b7e2b0a33f8ae26f4b1"} Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.138191 4701 scope.go:117] "RemoveContainer" containerID="4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.138351 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.180069 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.180732 4701 scope.go:117] "RemoveContainer" containerID="d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.196445 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.215697 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.216190 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerName="init" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216226 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerName="init" Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.216240 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f37895e6-74f4-4d54-a0b8-232c5ba5c018" containerName="nova-manage" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216248 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f37895e6-74f4-4d54-a0b8-232c5ba5c018" containerName="nova-manage" Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.216269 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerName="dnsmasq-dns" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216276 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerName="dnsmasq-dns" Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.216287 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-log" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216295 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-log" Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.216324 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-metadata" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216331 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-metadata" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216514 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9e1aa13-3797-4779-b94f-f39f2e4fcabd" containerName="dnsmasq-dns" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216532 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-metadata" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216549 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" containerName="nova-metadata-log" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.216559 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f37895e6-74f4-4d54-a0b8-232c5ba5c018" containerName="nova-manage" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.217878 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.221013 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.221272 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.225324 4701 scope.go:117] "RemoveContainer" containerID="4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a" Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.225962 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a\": container with ID starting with 4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a not found: ID does not exist" containerID="4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.226012 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a"} err="failed to get container status \"4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a\": rpc error: code = NotFound desc = could not find container \"4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a\": container with ID starting with 4336a6cf3798ec22e54d0ce87ce0f7a7e04b646adceb0048a6ffd573f71f4f4a not found: ID does not exist" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.226045 4701 scope.go:117] "RemoveContainer" containerID="d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.228569 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:51 crc kubenswrapper[4701]: E1121 19:22:51.229138 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a\": container with ID starting with d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a not found: ID does not exist" containerID="d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.229418 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a"} err="failed to get container status \"d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a\": rpc error: code = NotFound desc = could not find container \"d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a\": container with ID starting with d7be4db370d7b69d26fc12236bf4a82631413dfee073cf0cd71ebc33eb4d3b6a not found: ID does not exist" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.259346 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-config-data\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.259392 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e2f4186-103f-4356-8b8a-80a07cde4ac4-logs\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.259426 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.259697 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6khhv\" (UniqueName: \"kubernetes.io/projected/3e2f4186-103f-4356-8b8a-80a07cde4ac4-kube-api-access-6khhv\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.259938 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.363150 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.363378 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-config-data\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.363442 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e2f4186-103f-4356-8b8a-80a07cde4ac4-logs\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.363550 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.363650 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6khhv\" (UniqueName: \"kubernetes.io/projected/3e2f4186-103f-4356-8b8a-80a07cde4ac4-kube-api-access-6khhv\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.364799 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e2f4186-103f-4356-8b8a-80a07cde4ac4-logs\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.367424 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-config-data\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.367562 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.368937 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2f4186-103f-4356-8b8a-80a07cde4ac4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.385664 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6khhv\" (UniqueName: \"kubernetes.io/projected/3e2f4186-103f-4356-8b8a-80a07cde4ac4-kube-api-access-6khhv\") pod \"nova-metadata-0\" (UID: \"3e2f4186-103f-4356-8b8a-80a07cde4ac4\") " pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.412274 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.555503 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.579741 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-config-data\") pod \"9a5a04ce-27e1-4099-831f-aa3494125d7b\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.579805 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-internal-tls-certs\") pod \"9a5a04ce-27e1-4099-831f-aa3494125d7b\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.579910 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a5a04ce-27e1-4099-831f-aa3494125d7b-logs\") pod \"9a5a04ce-27e1-4099-831f-aa3494125d7b\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.580059 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-public-tls-certs\") pod \"9a5a04ce-27e1-4099-831f-aa3494125d7b\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.580125 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-combined-ca-bundle\") pod \"9a5a04ce-27e1-4099-831f-aa3494125d7b\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.580242 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz4tv\" (UniqueName: \"kubernetes.io/projected/9a5a04ce-27e1-4099-831f-aa3494125d7b-kube-api-access-xz4tv\") pod \"9a5a04ce-27e1-4099-831f-aa3494125d7b\" (UID: \"9a5a04ce-27e1-4099-831f-aa3494125d7b\") " Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.581570 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a5a04ce-27e1-4099-831f-aa3494125d7b-logs" (OuterVolumeSpecName: "logs") pod "9a5a04ce-27e1-4099-831f-aa3494125d7b" (UID: "9a5a04ce-27e1-4099-831f-aa3494125d7b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.584591 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a5a04ce-27e1-4099-831f-aa3494125d7b-kube-api-access-xz4tv" (OuterVolumeSpecName: "kube-api-access-xz4tv") pod "9a5a04ce-27e1-4099-831f-aa3494125d7b" (UID: "9a5a04ce-27e1-4099-831f-aa3494125d7b"). InnerVolumeSpecName "kube-api-access-xz4tv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.626352 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-config-data" (OuterVolumeSpecName: "config-data") pod "9a5a04ce-27e1-4099-831f-aa3494125d7b" (UID: "9a5a04ce-27e1-4099-831f-aa3494125d7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.626426 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9a5a04ce-27e1-4099-831f-aa3494125d7b" (UID: "9a5a04ce-27e1-4099-831f-aa3494125d7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.648945 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9a5a04ce-27e1-4099-831f-aa3494125d7b" (UID: "9a5a04ce-27e1-4099-831f-aa3494125d7b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.679523 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9a5a04ce-27e1-4099-831f-aa3494125d7b" (UID: "9a5a04ce-27e1-4099-831f-aa3494125d7b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.683756 4701 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9a5a04ce-27e1-4099-831f-aa3494125d7b-logs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.683806 4701 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.683817 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.683828 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz4tv\" (UniqueName: \"kubernetes.io/projected/9a5a04ce-27e1-4099-831f-aa3494125d7b-kube-api-access-xz4tv\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.683839 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.683856 4701 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5a04ce-27e1-4099-831f-aa3494125d7b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:51 crc kubenswrapper[4701]: I1121 19:22:51.967838 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66b0eb91-a81c-4a68-8e9d-a1042d709e80" path="/var/lib/kubelet/pods/66b0eb91-a81c-4a68-8e9d-a1042d709e80/volumes" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.142515 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 19:22:52 crc kubenswrapper[4701]: W1121 19:22:52.162777 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e2f4186_103f_4356_8b8a_80a07cde4ac4.slice/crio-cc5bb267a9decdbf74d68f8ed19b670ce4804bbb9eef080dc4a38d6b1121892e WatchSource:0}: Error finding container cc5bb267a9decdbf74d68f8ed19b670ce4804bbb9eef080dc4a38d6b1121892e: Status 404 returned error can't find the container with id cc5bb267a9decdbf74d68f8ed19b670ce4804bbb9eef080dc4a38d6b1121892e Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.162985 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9a5a04ce-27e1-4099-831f-aa3494125d7b","Type":"ContainerDied","Data":"1c1c3815fdff4ae338997415b6b2802cfeb5ab5e395473b4056e2b6bdd336997"} Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.163069 4701 scope.go:117] "RemoveContainer" containerID="6a9a4eda5c857b53227bfd37554da4e92e383f24e58ef0b8052a51c0e27ab5f6" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.163080 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.232011 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.234551 4701 scope.go:117] "RemoveContainer" containerID="397fdc3497ee13248f8b9a7ac5250d7df9b7a80eb6b6ffff0e59de7fe4f295cd" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.243804 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.266921 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:52 crc kubenswrapper[4701]: E1121 19:22:52.277726 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-api" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.277751 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-api" Nov 21 19:22:52 crc kubenswrapper[4701]: E1121 19:22:52.277779 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-log" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.277786 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-log" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.278266 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-log" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.278282 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" containerName="nova-api-api" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.280612 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.285092 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.285529 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.285702 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.301126 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.318269 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.318381 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.318445 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrztr\" (UniqueName: \"kubernetes.io/projected/7dfe7de3-4ade-4a2e-8826-be286e416d33-kube-api-access-rrztr\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.318518 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dfe7de3-4ade-4a2e-8826-be286e416d33-logs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.318583 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-config-data\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.318623 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-public-tls-certs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420084 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420130 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420161 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrztr\" (UniqueName: \"kubernetes.io/projected/7dfe7de3-4ade-4a2e-8826-be286e416d33-kube-api-access-rrztr\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420191 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dfe7de3-4ade-4a2e-8826-be286e416d33-logs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420234 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-config-data\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420256 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-public-tls-certs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.420761 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dfe7de3-4ade-4a2e-8826-be286e416d33-logs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.424353 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.425061 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-public-tls-certs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.425058 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-config-data\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.439777 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dfe7de3-4ade-4a2e-8826-be286e416d33-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.441400 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrztr\" (UniqueName: \"kubernetes.io/projected/7dfe7de3-4ade-4a2e-8826-be286e416d33-kube-api-access-rrztr\") pod \"nova-api-0\" (UID: \"7dfe7de3-4ade-4a2e-8826-be286e416d33\") " pod="openstack/nova-api-0" Nov 21 19:22:52 crc kubenswrapper[4701]: I1121 19:22:52.620292 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 19:22:53 crc kubenswrapper[4701]: I1121 19:22:53.179347 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 19:22:53 crc kubenswrapper[4701]: I1121 19:22:53.181868 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3e2f4186-103f-4356-8b8a-80a07cde4ac4","Type":"ContainerStarted","Data":"0f130aba293740cb8f46382fb6f9612e5de8c51882b3e3bb94548dc799529b13"} Nov 21 19:22:53 crc kubenswrapper[4701]: I1121 19:22:53.181937 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3e2f4186-103f-4356-8b8a-80a07cde4ac4","Type":"ContainerStarted","Data":"df42ac79390e80c2c23a6781fc4ebe22331ff7e383380142065ca007225981e3"} Nov 21 19:22:53 crc kubenswrapper[4701]: I1121 19:22:53.181959 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3e2f4186-103f-4356-8b8a-80a07cde4ac4","Type":"ContainerStarted","Data":"cc5bb267a9decdbf74d68f8ed19b670ce4804bbb9eef080dc4a38d6b1121892e"} Nov 21 19:22:53 crc kubenswrapper[4701]: I1121 19:22:53.210414 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.210377055 podStartE2EDuration="2.210377055s" podCreationTimestamp="2025-11-21 19:22:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:53.208361521 +0000 UTC m=+1263.993501548" watchObservedRunningTime="2025-11-21 19:22:53.210377055 +0000 UTC m=+1263.995517122" Nov 21 19:22:53 crc kubenswrapper[4701]: I1121 19:22:53.964936 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a5a04ce-27e1-4099-831f-aa3494125d7b" path="/var/lib/kubelet/pods/9a5a04ce-27e1-4099-831f-aa3494125d7b/volumes" Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.203505 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7dfe7de3-4ade-4a2e-8826-be286e416d33","Type":"ContainerStarted","Data":"e4fed9725f2bf512d5cbcd725fd78ac15b7ed06d541b5bc49848f66bf8fac354"} Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.205097 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7dfe7de3-4ade-4a2e-8826-be286e416d33","Type":"ContainerStarted","Data":"23f318db12b0cc2667f73ed1f4ff5a230a96308409f795c0948878143f1325d2"} Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.205126 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7dfe7de3-4ade-4a2e-8826-be286e416d33","Type":"ContainerStarted","Data":"58e96da2d9d1b4c1c9c6985e22bb892410ec294047a454fb613cd543c81d6d51"} Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.249258 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.2491872 podStartE2EDuration="2.2491872s" podCreationTimestamp="2025-11-21 19:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:54.232030255 +0000 UTC m=+1265.017170312" watchObservedRunningTime="2025-11-21 19:22:54.2491872 +0000 UTC m=+1265.034327267" Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.807314 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.922419 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-config-data\") pod \"dfec6afb-55e4-4efa-8e87-4f68937b2672\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.922530 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-combined-ca-bundle\") pod \"dfec6afb-55e4-4efa-8e87-4f68937b2672\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.922610 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxrxq\" (UniqueName: \"kubernetes.io/projected/dfec6afb-55e4-4efa-8e87-4f68937b2672-kube-api-access-wxrxq\") pod \"dfec6afb-55e4-4efa-8e87-4f68937b2672\" (UID: \"dfec6afb-55e4-4efa-8e87-4f68937b2672\") " Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.931589 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfec6afb-55e4-4efa-8e87-4f68937b2672-kube-api-access-wxrxq" (OuterVolumeSpecName: "kube-api-access-wxrxq") pod "dfec6afb-55e4-4efa-8e87-4f68937b2672" (UID: "dfec6afb-55e4-4efa-8e87-4f68937b2672"). InnerVolumeSpecName "kube-api-access-wxrxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.953500 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-config-data" (OuterVolumeSpecName: "config-data") pod "dfec6afb-55e4-4efa-8e87-4f68937b2672" (UID: "dfec6afb-55e4-4efa-8e87-4f68937b2672"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:54 crc kubenswrapper[4701]: I1121 19:22:54.954167 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dfec6afb-55e4-4efa-8e87-4f68937b2672" (UID: "dfec6afb-55e4-4efa-8e87-4f68937b2672"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.025094 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.025126 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfec6afb-55e4-4efa-8e87-4f68937b2672-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.025137 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxrxq\" (UniqueName: \"kubernetes.io/projected/dfec6afb-55e4-4efa-8e87-4f68937b2672-kube-api-access-wxrxq\") on node \"crc\" DevicePath \"\"" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.216590 4701 generic.go:334] "Generic (PLEG): container finished" podID="dfec6afb-55e4-4efa-8e87-4f68937b2672" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" exitCode=0 Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.216646 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.216769 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dfec6afb-55e4-4efa-8e87-4f68937b2672","Type":"ContainerDied","Data":"217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238"} Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.216854 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dfec6afb-55e4-4efa-8e87-4f68937b2672","Type":"ContainerDied","Data":"74ce980ba75b948c25143a2b1ad2b7c6ddd7d08054844b3c74bf034a92fc9ec9"} Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.216886 4701 scope.go:117] "RemoveContainer" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.257793 4701 scope.go:117] "RemoveContainer" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" Nov 21 19:22:55 crc kubenswrapper[4701]: E1121 19:22:55.260567 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238\": container with ID starting with 217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238 not found: ID does not exist" containerID="217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.260603 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238"} err="failed to get container status \"217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238\": rpc error: code = NotFound desc = could not find container \"217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238\": container with ID starting with 217c7063a1e554974ad75c90ab018bb48a6c866b14bde9fd66e948fa92c36238 not found: ID does not exist" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.266258 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.282577 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.293804 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:55 crc kubenswrapper[4701]: E1121 19:22:55.294400 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfec6afb-55e4-4efa-8e87-4f68937b2672" containerName="nova-scheduler-scheduler" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.294429 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfec6afb-55e4-4efa-8e87-4f68937b2672" containerName="nova-scheduler-scheduler" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.294720 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfec6afb-55e4-4efa-8e87-4f68937b2672" containerName="nova-scheduler-scheduler" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.295622 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.299409 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.315002 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.332017 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3df472cf-6795-4ca8-908b-01f824bf4b5e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.332421 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3df472cf-6795-4ca8-908b-01f824bf4b5e-config-data\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.332502 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmclp\" (UniqueName: \"kubernetes.io/projected/3df472cf-6795-4ca8-908b-01f824bf4b5e-kube-api-access-qmclp\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.434946 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3df472cf-6795-4ca8-908b-01f824bf4b5e-config-data\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.435130 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmclp\" (UniqueName: \"kubernetes.io/projected/3df472cf-6795-4ca8-908b-01f824bf4b5e-kube-api-access-qmclp\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.435259 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3df472cf-6795-4ca8-908b-01f824bf4b5e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.440854 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3df472cf-6795-4ca8-908b-01f824bf4b5e-config-data\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.448149 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3df472cf-6795-4ca8-908b-01f824bf4b5e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.467238 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmclp\" (UniqueName: \"kubernetes.io/projected/3df472cf-6795-4ca8-908b-01f824bf4b5e-kube-api-access-qmclp\") pod \"nova-scheduler-0\" (UID: \"3df472cf-6795-4ca8-908b-01f824bf4b5e\") " pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.619607 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 19:22:55 crc kubenswrapper[4701]: I1121 19:22:55.969856 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfec6afb-55e4-4efa-8e87-4f68937b2672" path="/var/lib/kubelet/pods/dfec6afb-55e4-4efa-8e87-4f68937b2672/volumes" Nov 21 19:22:56 crc kubenswrapper[4701]: I1121 19:22:56.136828 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 19:22:56 crc kubenswrapper[4701]: W1121 19:22:56.147207 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3df472cf_6795_4ca8_908b_01f824bf4b5e.slice/crio-c5cbcf387ad968543ebab0392b5b292bf1d498a5d92cad7003dc6ca04eca4846 WatchSource:0}: Error finding container c5cbcf387ad968543ebab0392b5b292bf1d498a5d92cad7003dc6ca04eca4846: Status 404 returned error can't find the container with id c5cbcf387ad968543ebab0392b5b292bf1d498a5d92cad7003dc6ca04eca4846 Nov 21 19:22:56 crc kubenswrapper[4701]: I1121 19:22:56.238085 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3df472cf-6795-4ca8-908b-01f824bf4b5e","Type":"ContainerStarted","Data":"c5cbcf387ad968543ebab0392b5b292bf1d498a5d92cad7003dc6ca04eca4846"} Nov 21 19:22:56 crc kubenswrapper[4701]: I1121 19:22:56.555726 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 19:22:56 crc kubenswrapper[4701]: I1121 19:22:56.555834 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 19:22:57 crc kubenswrapper[4701]: I1121 19:22:57.258015 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3df472cf-6795-4ca8-908b-01f824bf4b5e","Type":"ContainerStarted","Data":"a8e6241b48467f327e0f8d881fc970506d671cc4f3ee94c708df5050097d69c3"} Nov 21 19:22:57 crc kubenswrapper[4701]: I1121 19:22:57.287611 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.287573658 podStartE2EDuration="2.287573658s" podCreationTimestamp="2025-11-21 19:22:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:22:57.28471193 +0000 UTC m=+1268.069852007" watchObservedRunningTime="2025-11-21 19:22:57.287573658 +0000 UTC m=+1268.072713685" Nov 21 19:23:00 crc kubenswrapper[4701]: I1121 19:23:00.620672 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 19:23:01 crc kubenswrapper[4701]: I1121 19:23:01.555669 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 19:23:01 crc kubenswrapper[4701]: I1121 19:23:01.556124 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 19:23:02 crc kubenswrapper[4701]: I1121 19:23:02.574596 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3e2f4186-103f-4356-8b8a-80a07cde4ac4" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.219:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:23:02 crc kubenswrapper[4701]: I1121 19:23:02.574618 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3e2f4186-103f-4356-8b8a-80a07cde4ac4" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.219:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:23:02 crc kubenswrapper[4701]: I1121 19:23:02.621429 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:23:02 crc kubenswrapper[4701]: I1121 19:23:02.621507 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 19:23:03 crc kubenswrapper[4701]: I1121 19:23:03.640503 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7dfe7de3-4ade-4a2e-8826-be286e416d33" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.220:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:23:03 crc kubenswrapper[4701]: I1121 19:23:03.640617 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7dfe7de3-4ade-4a2e-8826-be286e416d33" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.220:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 19:23:05 crc kubenswrapper[4701]: I1121 19:23:05.620309 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 19:23:05 crc kubenswrapper[4701]: I1121 19:23:05.664756 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 19:23:06 crc kubenswrapper[4701]: I1121 19:23:06.474855 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 19:23:08 crc kubenswrapper[4701]: I1121 19:23:08.338469 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 19:23:11 crc kubenswrapper[4701]: I1121 19:23:11.572802 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 19:23:11 crc kubenswrapper[4701]: I1121 19:23:11.576382 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 19:23:11 crc kubenswrapper[4701]: I1121 19:23:11.583742 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.504098 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.640190 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.641312 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.642292 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.653896 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.736754 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:23:12 crc kubenswrapper[4701]: I1121 19:23:12.737050 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" containerName="kube-state-metrics" containerID="cri-o://e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab" gracePeriod=30 Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.287168 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.432192 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmr4d\" (UniqueName: \"kubernetes.io/projected/600d7142-cf1a-4e30-968d-5b75a572085d-kube-api-access-jmr4d\") pod \"600d7142-cf1a-4e30-968d-5b75a572085d\" (UID: \"600d7142-cf1a-4e30-968d-5b75a572085d\") " Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.452457 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/600d7142-cf1a-4e30-968d-5b75a572085d-kube-api-access-jmr4d" (OuterVolumeSpecName: "kube-api-access-jmr4d") pod "600d7142-cf1a-4e30-968d-5b75a572085d" (UID: "600d7142-cf1a-4e30-968d-5b75a572085d"). InnerVolumeSpecName "kube-api-access-jmr4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.506028 4701 generic.go:334] "Generic (PLEG): container finished" podID="600d7142-cf1a-4e30-968d-5b75a572085d" containerID="e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab" exitCode=2 Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.506421 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"600d7142-cf1a-4e30-968d-5b75a572085d","Type":"ContainerDied","Data":"e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab"} Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.506466 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"600d7142-cf1a-4e30-968d-5b75a572085d","Type":"ContainerDied","Data":"335f58aa33577b685678dab6d24654a33e4f7acc29d44df0bce19f421dffa906"} Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.506496 4701 scope.go:117] "RemoveContainer" containerID="e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.506701 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.507228 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.536345 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.540578 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmr4d\" (UniqueName: \"kubernetes.io/projected/600d7142-cf1a-4e30-968d-5b75a572085d-kube-api-access-jmr4d\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.540857 4701 scope.go:117] "RemoveContainer" containerID="e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab" Nov 21 19:23:13 crc kubenswrapper[4701]: E1121 19:23:13.549549 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab\": container with ID starting with e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab not found: ID does not exist" containerID="e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.549606 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab"} err="failed to get container status \"e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab\": rpc error: code = NotFound desc = could not find container \"e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab\": container with ID starting with e12e7f26b209cd40d77e7a2634e9bcdba16d9c8de05d10331b6d2c353484cbab not found: ID does not exist" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.566911 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.609569 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.629296 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:23:13 crc kubenswrapper[4701]: E1121 19:23:13.629778 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" containerName="kube-state-metrics" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.629798 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" containerName="kube-state-metrics" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.630019 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" containerName="kube-state-metrics" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.630758 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.635643 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.635915 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.658596 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.745159 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.745331 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rssf\" (UniqueName: \"kubernetes.io/projected/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-api-access-2rssf\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.745363 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.745392 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.847111 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.847259 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rssf\" (UniqueName: \"kubernetes.io/projected/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-api-access-2rssf\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.847281 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.847310 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.872925 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.873748 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.877782 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.878813 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rssf\" (UniqueName: \"kubernetes.io/projected/2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad-kube-api-access-2rssf\") pod \"kube-state-metrics-0\" (UID: \"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad\") " pod="openstack/kube-state-metrics-0" Nov 21 19:23:13 crc kubenswrapper[4701]: I1121 19:23:13.962047 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 19:23:14 crc kubenswrapper[4701]: I1121 19:23:14.027738 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="600d7142-cf1a-4e30-968d-5b75a572085d" path="/var/lib/kubelet/pods/600d7142-cf1a-4e30-968d-5b75a572085d/volumes" Nov 21 19:23:14 crc kubenswrapper[4701]: W1121 19:23:14.559423 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ebd86aa_d8dc_4f48_b9a4_c6445bdb71ad.slice/crio-b0415236981adf3d34fedfc9e200e4574096703620776217c3971ced23374dc8 WatchSource:0}: Error finding container b0415236981adf3d34fedfc9e200e4574096703620776217c3971ced23374dc8: Status 404 returned error can't find the container with id b0415236981adf3d34fedfc9e200e4574096703620776217c3971ced23374dc8 Nov 21 19:23:14 crc kubenswrapper[4701]: I1121 19:23:14.570213 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.266084 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.266960 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-central-agent" containerID="cri-o://c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca" gracePeriod=30 Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.267139 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="sg-core" containerID="cri-o://8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6" gracePeriod=30 Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.267090 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="proxy-httpd" containerID="cri-o://1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9" gracePeriod=30 Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.267217 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-notification-agent" containerID="cri-o://3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5" gracePeriod=30 Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.538289 4701 generic.go:334] "Generic (PLEG): container finished" podID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerID="1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9" exitCode=0 Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.538719 4701 generic.go:334] "Generic (PLEG): container finished" podID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerID="8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6" exitCode=2 Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.538361 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerDied","Data":"1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9"} Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.538823 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerDied","Data":"8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6"} Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.541630 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad","Type":"ContainerStarted","Data":"66c8f389762b3871ea73b2a8af69bcb778d1a7f77ab36aa13d8c5a378495ab57"} Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.541663 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad","Type":"ContainerStarted","Data":"b0415236981adf3d34fedfc9e200e4574096703620776217c3971ced23374dc8"} Nov 21 19:23:15 crc kubenswrapper[4701]: I1121 19:23:15.564259 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.18147928 podStartE2EDuration="2.564227431s" podCreationTimestamp="2025-11-21 19:23:13 +0000 UTC" firstStartedPulling="2025-11-21 19:23:14.563108458 +0000 UTC m=+1285.348248495" lastFinishedPulling="2025-11-21 19:23:14.945856629 +0000 UTC m=+1285.730996646" observedRunningTime="2025-11-21 19:23:15.561096226 +0000 UTC m=+1286.346236253" watchObservedRunningTime="2025-11-21 19:23:15.564227431 +0000 UTC m=+1286.349367458" Nov 21 19:23:16 crc kubenswrapper[4701]: I1121 19:23:16.568903 4701 generic.go:334] "Generic (PLEG): container finished" podID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerID="c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca" exitCode=0 Nov 21 19:23:16 crc kubenswrapper[4701]: I1121 19:23:16.568993 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerDied","Data":"c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca"} Nov 21 19:23:16 crc kubenswrapper[4701]: I1121 19:23:16.570147 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.121132 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.276607 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-config-data\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.276752 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-run-httpd\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.276846 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gj6q\" (UniqueName: \"kubernetes.io/projected/89bc2158-a924-4e24-8c88-bc981495b7ab-kube-api-access-5gj6q\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.276999 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-sg-core-conf-yaml\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.277167 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-combined-ca-bundle\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.277283 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-log-httpd\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.277370 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-scripts\") pod \"89bc2158-a924-4e24-8c88-bc981495b7ab\" (UID: \"89bc2158-a924-4e24-8c88-bc981495b7ab\") " Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.277569 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.278062 4701 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.278977 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.284524 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89bc2158-a924-4e24-8c88-bc981495b7ab-kube-api-access-5gj6q" (OuterVolumeSpecName: "kube-api-access-5gj6q") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "kube-api-access-5gj6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.285392 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-scripts" (OuterVolumeSpecName: "scripts") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.309854 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.380758 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gj6q\" (UniqueName: \"kubernetes.io/projected/89bc2158-a924-4e24-8c88-bc981495b7ab-kube-api-access-5gj6q\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.380943 4701 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.381021 4701 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/89bc2158-a924-4e24-8c88-bc981495b7ab-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.381087 4701 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.384168 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.402919 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-config-data" (OuterVolumeSpecName: "config-data") pod "89bc2158-a924-4e24-8c88-bc981495b7ab" (UID: "89bc2158-a924-4e24-8c88-bc981495b7ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.487807 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.487860 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bc2158-a924-4e24-8c88-bc981495b7ab-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.650974 4701 generic.go:334] "Generic (PLEG): container finished" podID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerID="3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5" exitCode=0 Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.651025 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerDied","Data":"3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5"} Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.651055 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"89bc2158-a924-4e24-8c88-bc981495b7ab","Type":"ContainerDied","Data":"09918835caba4284fb3b5d69b238b462d9a71b1697719df9479229a9a867519b"} Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.651056 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.651093 4701 scope.go:117] "RemoveContainer" containerID="1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.701462 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.726172 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.739729 4701 scope.go:117] "RemoveContainer" containerID="8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.763037 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.764048 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-notification-agent" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764073 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-notification-agent" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.764108 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="sg-core" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764114 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="sg-core" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.764151 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="proxy-httpd" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764158 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="proxy-httpd" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.764172 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-central-agent" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764179 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-central-agent" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764573 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-notification-agent" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764632 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="sg-core" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764650 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="ceilometer-central-agent" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.764678 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" containerName="proxy-httpd" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.769023 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.772531 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.777168 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.777954 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.795730 4701 scope.go:117] "RemoveContainer" containerID="3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.804550 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.819101 4701 scope.go:117] "RemoveContainer" containerID="c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.840826 4701 scope.go:117] "RemoveContainer" containerID="1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.841277 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9\": container with ID starting with 1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9 not found: ID does not exist" containerID="1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.841308 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9"} err="failed to get container status \"1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9\": rpc error: code = NotFound desc = could not find container \"1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9\": container with ID starting with 1d23d1e0a85c550c1fd905d46e8f8811f9eff8efef226ed9a1f01cacce70ccd9 not found: ID does not exist" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.841330 4701 scope.go:117] "RemoveContainer" containerID="8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.841665 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6\": container with ID starting with 8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6 not found: ID does not exist" containerID="8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.841682 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6"} err="failed to get container status \"8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6\": rpc error: code = NotFound desc = could not find container \"8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6\": container with ID starting with 8697d0f11f914089c6efb2568a7b77f2657978f861a6352963c2aa740c30bae6 not found: ID does not exist" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.841732 4701 scope.go:117] "RemoveContainer" containerID="3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.842351 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5\": container with ID starting with 3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5 not found: ID does not exist" containerID="3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.842413 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5"} err="failed to get container status \"3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5\": rpc error: code = NotFound desc = could not find container \"3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5\": container with ID starting with 3b6c88637788a44438b2cd61aa5ed3b5099c0b16574186d2d1937bb0a9fd5cb5 not found: ID does not exist" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.842445 4701 scope.go:117] "RemoveContainer" containerID="c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca" Nov 21 19:23:21 crc kubenswrapper[4701]: E1121 19:23:21.842755 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca\": container with ID starting with c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca not found: ID does not exist" containerID="c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.842785 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca"} err="failed to get container status \"c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca\": rpc error: code = NotFound desc = could not find container \"c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca\": container with ID starting with c9bab56b23fb1f711d3f4b1613ab69df67e5a52c1105f3c8fdf47fdff783d4ca not found: ID does not exist" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896331 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896469 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-config-data\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896645 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6k6r\" (UniqueName: \"kubernetes.io/projected/7446e023-4eae-4738-ab4a-4ddf024cd980-kube-api-access-v6k6r\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896721 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896777 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896823 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-scripts\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.896951 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7446e023-4eae-4738-ab4a-4ddf024cd980-log-httpd\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.897068 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7446e023-4eae-4738-ab4a-4ddf024cd980-run-httpd\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.964472 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89bc2158-a924-4e24-8c88-bc981495b7ab" path="/var/lib/kubelet/pods/89bc2158-a924-4e24-8c88-bc981495b7ab/volumes" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999273 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7446e023-4eae-4738-ab4a-4ddf024cd980-run-httpd\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999335 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999448 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-config-data\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999502 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6k6r\" (UniqueName: \"kubernetes.io/projected/7446e023-4eae-4738-ab4a-4ddf024cd980-kube-api-access-v6k6r\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999540 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999567 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999595 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-scripts\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:21 crc kubenswrapper[4701]: I1121 19:23:21.999648 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7446e023-4eae-4738-ab4a-4ddf024cd980-log-httpd\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.000050 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7446e023-4eae-4738-ab4a-4ddf024cd980-run-httpd\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.000171 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7446e023-4eae-4738-ab4a-4ddf024cd980-log-httpd\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.004802 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-config-data\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.005051 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.005831 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-scripts\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.006114 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.035714 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6k6r\" (UniqueName: \"kubernetes.io/projected/7446e023-4eae-4738-ab4a-4ddf024cd980-kube-api-access-v6k6r\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.041951 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7446e023-4eae-4738-ab4a-4ddf024cd980-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7446e023-4eae-4738-ab4a-4ddf024cd980\") " pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.100612 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.399037 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 19:23:22 crc kubenswrapper[4701]: I1121 19:23:22.668256 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7446e023-4eae-4738-ab4a-4ddf024cd980","Type":"ContainerStarted","Data":"2f250ac1152d7a5ddb61e48ca34596acac1029d7528265e33d49dc51274d4547"} Nov 21 19:23:23 crc kubenswrapper[4701]: I1121 19:23:23.698954 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7446e023-4eae-4738-ab4a-4ddf024cd980","Type":"ContainerStarted","Data":"e5c7a902c8aaf9bd5961e1ee2c49f8463d16253118b4548415abeba0f94395c9"} Nov 21 19:23:23 crc kubenswrapper[4701]: I1121 19:23:23.699879 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7446e023-4eae-4738-ab4a-4ddf024cd980","Type":"ContainerStarted","Data":"e259e67f7599b518ff3fa57791097ac4f8adc16aed5648ef0fafefe2b3e4c155"} Nov 21 19:23:23 crc kubenswrapper[4701]: I1121 19:23:23.995192 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 21 19:23:24 crc kubenswrapper[4701]: I1121 19:23:24.767513 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7446e023-4eae-4738-ab4a-4ddf024cd980","Type":"ContainerStarted","Data":"786af3a32271eedeef7a6438f5edf902dd49f65915e6c78a3af199ed2ea2be0f"} Nov 21 19:23:25 crc kubenswrapper[4701]: I1121 19:23:25.788116 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7446e023-4eae-4738-ab4a-4ddf024cd980","Type":"ContainerStarted","Data":"c16ba4ce0418c88f4ed8b537cd402212c18d3133d8f56bd17fffd38aefa656d2"} Nov 21 19:23:25 crc kubenswrapper[4701]: I1121 19:23:25.788801 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 19:23:25 crc kubenswrapper[4701]: I1121 19:23:25.859848 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.209018885 podStartE2EDuration="4.859802829s" podCreationTimestamp="2025-11-21 19:23:21 +0000 UTC" firstStartedPulling="2025-11-21 19:23:22.422853732 +0000 UTC m=+1293.207993759" lastFinishedPulling="2025-11-21 19:23:25.073637676 +0000 UTC m=+1295.858777703" observedRunningTime="2025-11-21 19:23:25.837952426 +0000 UTC m=+1296.623092453" watchObservedRunningTime="2025-11-21 19:23:25.859802829 +0000 UTC m=+1296.644942856" Nov 21 19:23:48 crc kubenswrapper[4701]: I1121 19:23:48.614007 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:23:48 crc kubenswrapper[4701]: I1121 19:23:48.615226 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:23:52 crc kubenswrapper[4701]: I1121 19:23:52.115004 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 19:24:02 crc kubenswrapper[4701]: I1121 19:24:02.355940 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:24:04 crc kubenswrapper[4701]: I1121 19:24:04.355269 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:24:05 crc kubenswrapper[4701]: I1121 19:24:05.174350 4701 scope.go:117] "RemoveContainer" containerID="23e8c445997290df8ede75813727bb744fb7ba2bcfc095ffec588084793fd21f" Nov 21 19:24:05 crc kubenswrapper[4701]: I1121 19:24:05.220549 4701 scope.go:117] "RemoveContainer" containerID="c716d8fd3a3a958d6a50f56edb0496829536007c47813bf18af12a842b48ba89" Nov 21 19:24:05 crc kubenswrapper[4701]: I1121 19:24:05.949539 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="rabbitmq" containerID="cri-o://36ec982cd779dffc1e34f70f4637cd70db0da4c97841b60626b44a49cb751f98" gracePeriod=604797 Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.349512 4701 generic.go:334] "Generic (PLEG): container finished" podID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerID="36ec982cd779dffc1e34f70f4637cd70db0da4c97841b60626b44a49cb751f98" exitCode=0 Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.349598 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3","Type":"ContainerDied","Data":"36ec982cd779dffc1e34f70f4637cd70db0da4c97841b60626b44a49cb751f98"} Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.701068 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816590 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-pod-info\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816688 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-erlang-cookie-secret\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816745 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-plugins\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816808 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-server-conf\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816836 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-plugins-conf\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816884 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-config-data\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.816911 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-confd\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.817079 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82vq4\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-kube-api-access-82vq4\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.817119 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.817218 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-erlang-cookie\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.817278 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-tls\") pod \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\" (UID: \"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3\") " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.820986 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.821128 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.824243 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.830692 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-pod-info" (OuterVolumeSpecName: "pod-info") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.833372 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.833461 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-kube-api-access-82vq4" (OuterVolumeSpecName: "kube-api-access-82vq4") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "kube-api-access-82vq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.838263 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.839990 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.879397 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-config-data" (OuterVolumeSpecName: "config-data") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920593 4701 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920648 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920658 4701 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920670 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920680 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82vq4\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-kube-api-access-82vq4\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920707 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920716 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920724 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.920732 4701 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-pod-info\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.950400 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.962382 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-server-conf" (OuterVolumeSpecName: "server-conf") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:07 crc kubenswrapper[4701]: I1121 19:24:07.974979 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" (UID: "34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.021442 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="rabbitmq" containerID="cri-o://ec26fe58e71ed5e823c8c6c8ac6317b45cf155e256eebfa23596701c45b7ed26" gracePeriod=604797 Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.022886 4701 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-server-conf\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.022920 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.023037 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.362307 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3","Type":"ContainerDied","Data":"2262d54691fb30547034c388144ba5bd93002a261e43134133f5c3f14bb77fe4"} Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.362377 4701 scope.go:117] "RemoveContainer" containerID="36ec982cd779dffc1e34f70f4637cd70db0da4c97841b60626b44a49cb751f98" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.362388 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.406269 4701 scope.go:117] "RemoveContainer" containerID="e4570b453a88dbab2e9d7e0002bd8c806baddbf864333070457a2d3f8f6d6688" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.497443 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.514400 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.542144 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:24:08 crc kubenswrapper[4701]: E1121 19:24:08.543190 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="setup-container" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.543332 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="setup-container" Nov 21 19:24:08 crc kubenswrapper[4701]: E1121 19:24:08.543427 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="rabbitmq" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.543516 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="rabbitmq" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.543955 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" containerName="rabbitmq" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.545585 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.548838 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.549629 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.551217 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.552194 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.553481 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-lj7hl" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.553661 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.553830 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.553962 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.648920 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649023 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fcd41199-949d-4c9f-9154-f83acb9bb997-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649052 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649111 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649150 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-config-data\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649176 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649260 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649280 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649311 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649642 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxrzk\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-kube-api-access-nxrzk\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.649899 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fcd41199-949d-4c9f-9154-f83acb9bb997-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.751902 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fcd41199-949d-4c9f-9154-f83acb9bb997-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.751953 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxrzk\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-kube-api-access-nxrzk\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752011 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752055 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fcd41199-949d-4c9f-9154-f83acb9bb997-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752081 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752151 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752186 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-config-data\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752232 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752257 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752283 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.752325 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.753793 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.754035 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.754158 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-config-data\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.754217 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.754625 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.754846 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fcd41199-949d-4c9f-9154-f83acb9bb997-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.759371 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fcd41199-949d-4c9f-9154-f83acb9bb997-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.759763 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fcd41199-949d-4c9f-9154-f83acb9bb997-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.775156 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.775986 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.782380 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxrzk\" (UniqueName: \"kubernetes.io/projected/fcd41199-949d-4c9f-9154-f83acb9bb997-kube-api-access-nxrzk\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.796787 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"fcd41199-949d-4c9f-9154-f83acb9bb997\") " pod="openstack/rabbitmq-server-0" Nov 21 19:24:08 crc kubenswrapper[4701]: I1121 19:24:08.869052 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 19:24:09 crc kubenswrapper[4701]: I1121 19:24:09.419177 4701 generic.go:334] "Generic (PLEG): container finished" podID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerID="ec26fe58e71ed5e823c8c6c8ac6317b45cf155e256eebfa23596701c45b7ed26" exitCode=0 Nov 21 19:24:09 crc kubenswrapper[4701]: I1121 19:24:09.419268 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02","Type":"ContainerDied","Data":"ec26fe58e71ed5e823c8c6c8ac6317b45cf155e256eebfa23596701c45b7ed26"} Nov 21 19:24:09 crc kubenswrapper[4701]: I1121 19:24:09.946151 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:09 crc kubenswrapper[4701]: I1121 19:24:09.969786 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3" path="/var/lib/kubelet/pods/34a50e7c-5ef4-4882-9ecb-0b744f6d1ab3/volumes" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.047772 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.079452 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-erlang-cookie\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.079707 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-tls\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.079931 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-plugins-conf\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.080094 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-pod-info\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.080422 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-config-data\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.080565 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn4kv\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-kube-api-access-jn4kv\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.080727 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-server-conf\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.080889 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-confd\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.081043 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-plugins\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.081239 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-erlang-cookie-secret\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.081367 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\" (UID: \"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02\") " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.082659 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.082958 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.086709 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-pod-info" (OuterVolumeSpecName: "pod-info") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.086731 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.087327 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-kube-api-access-jn4kv" (OuterVolumeSpecName: "kube-api-access-jn4kv") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "kube-api-access-jn4kv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.089670 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "persistence") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.095223 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.097266 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.113220 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-config-data" (OuterVolumeSpecName: "config-data") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.160500 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-server-conf" (OuterVolumeSpecName: "server-conf") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184155 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn4kv\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-kube-api-access-jn4kv\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184183 4701 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-server-conf\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184194 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184217 4701 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184249 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184262 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184271 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184281 4701 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184288 4701 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-pod-info\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.184322 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.209973 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.250131 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" (UID: "0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.286132 4701 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.286176 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.435027 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcd41199-949d-4c9f-9154-f83acb9bb997","Type":"ContainerStarted","Data":"cc62421917e697a6d0a623cff1e623cab55ccb2b54e0adc103698124fc65c691"} Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.437437 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02","Type":"ContainerDied","Data":"c9446e53b3e237c3c4a89e71c0867ff2fcbabe4608d2230c6fa8b41e098bc15a"} Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.437474 4701 scope.go:117] "RemoveContainer" containerID="ec26fe58e71ed5e823c8c6c8ac6317b45cf155e256eebfa23596701c45b7ed26" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.437591 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.467912 4701 scope.go:117] "RemoveContainer" containerID="22e1855bd9bfa3a229c0412c5188f4203d022bce21d3827cf5280bada0841afd" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.482259 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.490282 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.511598 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:24:10 crc kubenswrapper[4701]: E1121 19:24:10.512087 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="setup-container" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.512100 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="setup-container" Nov 21 19:24:10 crc kubenswrapper[4701]: E1121 19:24:10.512110 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="rabbitmq" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.512116 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="rabbitmq" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.512423 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" containerName="rabbitmq" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.513600 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.519382 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.519726 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.519654 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.519712 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-27bkv" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.519722 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.523675 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.519762 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.548109 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596138 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596230 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596346 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596377 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c53b35a3-36ed-43a5-a400-4658b9408596-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596399 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596451 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596472 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr44s\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-kube-api-access-qr44s\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596520 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596549 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596598 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c53b35a3-36ed-43a5-a400-4658b9408596-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.596729 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.699828 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.699874 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.699918 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.699940 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c53b35a3-36ed-43a5-a400-4658b9408596-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.699958 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.699988 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.700007 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr44s\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-kube-api-access-qr44s\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.700031 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.700052 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.700079 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c53b35a3-36ed-43a5-a400-4658b9408596-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.700129 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.701211 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.701512 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.701779 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.702640 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.702747 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c53b35a3-36ed-43a5-a400-4658b9408596-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.706678 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.706842 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.707224 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.707543 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c53b35a3-36ed-43a5-a400-4658b9408596-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.711244 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c53b35a3-36ed-43a5-a400-4658b9408596-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.722240 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr44s\" (UniqueName: \"kubernetes.io/projected/c53b35a3-36ed-43a5-a400-4658b9408596-kube-api-access-qr44s\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.734134 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c53b35a3-36ed-43a5-a400-4658b9408596\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:10 crc kubenswrapper[4701]: I1121 19:24:10.886939 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:11 crc kubenswrapper[4701]: I1121 19:24:11.431130 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 19:24:11 crc kubenswrapper[4701]: I1121 19:24:11.448092 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c53b35a3-36ed-43a5-a400-4658b9408596","Type":"ContainerStarted","Data":"dfbf2090cc6ac704eb55f0e395b4d6e847b343c6625a6e9d4250b9ddaa8c9ae6"} Nov 21 19:24:11 crc kubenswrapper[4701]: I1121 19:24:11.970833 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02" path="/var/lib/kubelet/pods/0f23fd8b-e3f0-4d0c-97fb-e0c0a5a37c02/volumes" Nov 21 19:24:12 crc kubenswrapper[4701]: I1121 19:24:12.482713 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcd41199-949d-4c9f-9154-f83acb9bb997","Type":"ContainerStarted","Data":"bc1c1d76dffe69c4705b949d820f18594e75795209c43265a9dc6d47b234474a"} Nov 21 19:24:14 crc kubenswrapper[4701]: I1121 19:24:14.511814 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c53b35a3-36ed-43a5-a400-4658b9408596","Type":"ContainerStarted","Data":"8eb430fe96874aea2f612a9a8af7058cd5706de37766ae79763a0d4bdc6b5c81"} Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.010973 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5566b88579-29nl7"] Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.024739 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.030452 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.041415 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5566b88579-29nl7"] Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.086192 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-config\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.086364 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-svc\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.086426 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-nb\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.086560 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-swift-storage-0\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.086646 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.087271 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pszhl\" (UniqueName: \"kubernetes.io/projected/d127bd92-3fee-42b0-b36a-1ec82c50925b-kube-api-access-pszhl\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.087371 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-openstack-edpm-ipam\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190062 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-swift-storage-0\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190552 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190672 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pszhl\" (UniqueName: \"kubernetes.io/projected/d127bd92-3fee-42b0-b36a-1ec82c50925b-kube-api-access-pszhl\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190703 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-openstack-edpm-ipam\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190750 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-config\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190796 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-svc\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.190837 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-nb\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.191661 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.192007 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-nb\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.192192 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-openstack-edpm-ipam\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.192687 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-svc\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.192903 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-config\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.193726 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-swift-storage-0\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.215475 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pszhl\" (UniqueName: \"kubernetes.io/projected/d127bd92-3fee-42b0-b36a-1ec82c50925b-kube-api-access-pszhl\") pod \"dnsmasq-dns-5566b88579-29nl7\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.376225 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:17 crc kubenswrapper[4701]: I1121 19:24:17.987793 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5566b88579-29nl7"] Nov 21 19:24:18 crc kubenswrapper[4701]: I1121 19:24:18.577927 4701 generic.go:334] "Generic (PLEG): container finished" podID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerID="d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972" exitCode=0 Nov 21 19:24:18 crc kubenswrapper[4701]: I1121 19:24:18.578004 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5566b88579-29nl7" event={"ID":"d127bd92-3fee-42b0-b36a-1ec82c50925b","Type":"ContainerDied","Data":"d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972"} Nov 21 19:24:18 crc kubenswrapper[4701]: I1121 19:24:18.578408 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5566b88579-29nl7" event={"ID":"d127bd92-3fee-42b0-b36a-1ec82c50925b","Type":"ContainerStarted","Data":"ddbfb64cf153b59f46ee08797079236874e99a52928578d97e9975e3a3bfb688"} Nov 21 19:24:18 crc kubenswrapper[4701]: I1121 19:24:18.614592 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:24:18 crc kubenswrapper[4701]: I1121 19:24:18.614711 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:24:19 crc kubenswrapper[4701]: I1121 19:24:19.596051 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5566b88579-29nl7" event={"ID":"d127bd92-3fee-42b0-b36a-1ec82c50925b","Type":"ContainerStarted","Data":"76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898"} Nov 21 19:24:19 crc kubenswrapper[4701]: I1121 19:24:19.596693 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:19 crc kubenswrapper[4701]: I1121 19:24:19.623974 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5566b88579-29nl7" podStartSLOduration=3.623943498 podStartE2EDuration="3.623943498s" podCreationTimestamp="2025-11-21 19:24:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:24:19.621542903 +0000 UTC m=+1350.406682930" watchObservedRunningTime="2025-11-21 19:24:19.623943498 +0000 UTC m=+1350.409083555" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.378866 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.492284 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77cbdf4f4c-z72nc"] Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.492999 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerName="dnsmasq-dns" containerID="cri-o://e00f3227823f34af71cf8759d1fb4b560719497d2e5b56c9c4793581bd481ca0" gracePeriod=10 Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.686715 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5894fd8d75-7xcgx"] Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.689426 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.696111 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5894fd8d75-7xcgx"] Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.728120 4701 generic.go:334] "Generic (PLEG): container finished" podID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerID="e00f3227823f34af71cf8759d1fb4b560719497d2e5b56c9c4793581bd481ca0" exitCode=0 Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.728178 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" event={"ID":"5bc9a4f7-dbee-40f5-abff-15038163a9a4","Type":"ContainerDied","Data":"e00f3227823f34af71cf8759d1fb4b560719497d2e5b56c9c4793581bd481ca0"} Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.795462 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-dns-svc\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.795976 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-ovsdbserver-nb\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.796015 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-dns-swift-storage-0\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.796047 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-config\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.796080 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-ovsdbserver-sb\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.796107 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-openstack-edpm-ipam\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.796160 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqwsh\" (UniqueName: \"kubernetes.io/projected/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-kube-api-access-wqwsh\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.897816 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-ovsdbserver-nb\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.897867 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-dns-swift-storage-0\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.897899 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-config\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.897942 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-ovsdbserver-sb\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.897981 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-openstack-edpm-ipam\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.898042 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqwsh\" (UniqueName: \"kubernetes.io/projected/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-kube-api-access-wqwsh\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.898110 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-dns-svc\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.898862 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-ovsdbserver-nb\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.898862 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-dns-swift-storage-0\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.899276 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-dns-svc\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.899435 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-ovsdbserver-sb\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.899506 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-openstack-edpm-ipam\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.901122 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-config\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:27 crc kubenswrapper[4701]: I1121 19:24:27.959620 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqwsh\" (UniqueName: \"kubernetes.io/projected/7a22d8c3-ddf4-4901-b1c6-39a9099d1de6-kube-api-access-wqwsh\") pod \"dnsmasq-dns-5894fd8d75-7xcgx\" (UID: \"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6\") " pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.017010 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.171136 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.205693 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phwzq\" (UniqueName: \"kubernetes.io/projected/5bc9a4f7-dbee-40f5-abff-15038163a9a4-kube-api-access-phwzq\") pod \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.205790 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-config\") pod \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.205870 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-swift-storage-0\") pod \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.205919 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-svc\") pod \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.206009 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-nb\") pod \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.206029 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-sb\") pod \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\" (UID: \"5bc9a4f7-dbee-40f5-abff-15038163a9a4\") " Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.213227 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bc9a4f7-dbee-40f5-abff-15038163a9a4-kube-api-access-phwzq" (OuterVolumeSpecName: "kube-api-access-phwzq") pod "5bc9a4f7-dbee-40f5-abff-15038163a9a4" (UID: "5bc9a4f7-dbee-40f5-abff-15038163a9a4"). InnerVolumeSpecName "kube-api-access-phwzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.295353 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5bc9a4f7-dbee-40f5-abff-15038163a9a4" (UID: "5bc9a4f7-dbee-40f5-abff-15038163a9a4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.306360 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5bc9a4f7-dbee-40f5-abff-15038163a9a4" (UID: "5bc9a4f7-dbee-40f5-abff-15038163a9a4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.310508 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.310548 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phwzq\" (UniqueName: \"kubernetes.io/projected/5bc9a4f7-dbee-40f5-abff-15038163a9a4-kube-api-access-phwzq\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.310565 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.310779 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5bc9a4f7-dbee-40f5-abff-15038163a9a4" (UID: "5bc9a4f7-dbee-40f5-abff-15038163a9a4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.325266 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-config" (OuterVolumeSpecName: "config") pod "5bc9a4f7-dbee-40f5-abff-15038163a9a4" (UID: "5bc9a4f7-dbee-40f5-abff-15038163a9a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.325642 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5bc9a4f7-dbee-40f5-abff-15038163a9a4" (UID: "5bc9a4f7-dbee-40f5-abff-15038163a9a4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.412822 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.412869 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.412887 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc9a4f7-dbee-40f5-abff-15038163a9a4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.574341 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5894fd8d75-7xcgx"] Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.741719 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" event={"ID":"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6","Type":"ContainerStarted","Data":"b99bc506aaf4695807d52a88c6f9a27f48762fc3c93ba7f907abc6d9cbdacb9c"} Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.745711 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" event={"ID":"5bc9a4f7-dbee-40f5-abff-15038163a9a4","Type":"ContainerDied","Data":"071ea593ce41110b292286e13c7a21efc6ad8ec3572f089eabf14d711119ca42"} Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.745777 4701 scope.go:117] "RemoveContainer" containerID="e00f3227823f34af71cf8759d1fb4b560719497d2e5b56c9c4793581bd481ca0" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.745782 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77cbdf4f4c-z72nc" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.799539 4701 scope.go:117] "RemoveContainer" containerID="88752033be19614c3741bc84cbade0dd93e11a2d41bc18edf4ed8a424babc476" Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.848060 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77cbdf4f4c-z72nc"] Nov 21 19:24:28 crc kubenswrapper[4701]: I1121 19:24:28.857237 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77cbdf4f4c-z72nc"] Nov 21 19:24:29 crc kubenswrapper[4701]: I1121 19:24:29.802898 4701 generic.go:334] "Generic (PLEG): container finished" podID="7a22d8c3-ddf4-4901-b1c6-39a9099d1de6" containerID="4f894bf9dd8d72cbccfe4264eee7410c7f47a594201ad3013422dde7e9cb7c74" exitCode=0 Nov 21 19:24:29 crc kubenswrapper[4701]: I1121 19:24:29.803345 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" event={"ID":"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6","Type":"ContainerDied","Data":"4f894bf9dd8d72cbccfe4264eee7410c7f47a594201ad3013422dde7e9cb7c74"} Nov 21 19:24:29 crc kubenswrapper[4701]: I1121 19:24:29.966045 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" path="/var/lib/kubelet/pods/5bc9a4f7-dbee-40f5-abff-15038163a9a4/volumes" Nov 21 19:24:30 crc kubenswrapper[4701]: I1121 19:24:30.832342 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" event={"ID":"7a22d8c3-ddf4-4901-b1c6-39a9099d1de6","Type":"ContainerStarted","Data":"dc0ea29c9af0034a5c41dadc833c0f60516700ff26f30370fe67ae619b04803b"} Nov 21 19:24:30 crc kubenswrapper[4701]: I1121 19:24:30.832947 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:30 crc kubenswrapper[4701]: I1121 19:24:30.869929 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" podStartSLOduration=3.869908352 podStartE2EDuration="3.869908352s" podCreationTimestamp="2025-11-21 19:24:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:24:30.861920627 +0000 UTC m=+1361.647060644" watchObservedRunningTime="2025-11-21 19:24:30.869908352 +0000 UTC m=+1361.655048379" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.019564 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5894fd8d75-7xcgx" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.128829 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5566b88579-29nl7"] Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.129141 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5566b88579-29nl7" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerName="dnsmasq-dns" containerID="cri-o://76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898" gracePeriod=10 Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.714511 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.835588 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-config\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.835776 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pszhl\" (UniqueName: \"kubernetes.io/projected/d127bd92-3fee-42b0-b36a-1ec82c50925b-kube-api-access-pszhl\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.835823 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-svc\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.835886 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-swift-storage-0\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.835985 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-nb\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.836110 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.836159 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-openstack-edpm-ipam\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.843852 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d127bd92-3fee-42b0-b36a-1ec82c50925b-kube-api-access-pszhl" (OuterVolumeSpecName: "kube-api-access-pszhl") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "kube-api-access-pszhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.891504 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-config" (OuterVolumeSpecName: "config") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.908350 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.916968 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.919512 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.939142 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.939585 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb\") pod \"d127bd92-3fee-42b0-b36a-1ec82c50925b\" (UID: \"d127bd92-3fee-42b0-b36a-1ec82c50925b\") " Nov 21 19:24:38 crc kubenswrapper[4701]: W1121 19:24:38.939687 4701 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/d127bd92-3fee-42b0-b36a-1ec82c50925b/volumes/kubernetes.io~configmap/ovsdbserver-sb Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.939701 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.939771 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d127bd92-3fee-42b0-b36a-1ec82c50925b" (UID: "d127bd92-3fee-42b0-b36a-1ec82c50925b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940347 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940378 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pszhl\" (UniqueName: \"kubernetes.io/projected/d127bd92-3fee-42b0-b36a-1ec82c50925b-kube-api-access-pszhl\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940394 4701 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940409 4701 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940422 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940435 4701 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.940447 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/d127bd92-3fee-42b0-b36a-1ec82c50925b-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.973928 4701 generic.go:334] "Generic (PLEG): container finished" podID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerID="76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898" exitCode=0 Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.973990 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5566b88579-29nl7" event={"ID":"d127bd92-3fee-42b0-b36a-1ec82c50925b","Type":"ContainerDied","Data":"76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898"} Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.974074 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5566b88579-29nl7" event={"ID":"d127bd92-3fee-42b0-b36a-1ec82c50925b","Type":"ContainerDied","Data":"ddbfb64cf153b59f46ee08797079236874e99a52928578d97e9975e3a3bfb688"} Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.974085 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5566b88579-29nl7" Nov 21 19:24:38 crc kubenswrapper[4701]: I1121 19:24:38.974101 4701 scope.go:117] "RemoveContainer" containerID="76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898" Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.002710 4701 scope.go:117] "RemoveContainer" containerID="d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972" Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.024430 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5566b88579-29nl7"] Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.037852 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5566b88579-29nl7"] Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.051077 4701 scope.go:117] "RemoveContainer" containerID="76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898" Nov 21 19:24:39 crc kubenswrapper[4701]: E1121 19:24:39.051735 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898\": container with ID starting with 76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898 not found: ID does not exist" containerID="76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898" Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.051772 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898"} err="failed to get container status \"76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898\": rpc error: code = NotFound desc = could not find container \"76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898\": container with ID starting with 76bb965482e19afead922b216b14cf98b45a9b454ba7fc6b7f53f0ddc57f7898 not found: ID does not exist" Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.051801 4701 scope.go:117] "RemoveContainer" containerID="d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972" Nov 21 19:24:39 crc kubenswrapper[4701]: E1121 19:24:39.052279 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972\": container with ID starting with d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972 not found: ID does not exist" containerID="d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972" Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.052307 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972"} err="failed to get container status \"d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972\": rpc error: code = NotFound desc = could not find container \"d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972\": container with ID starting with d318b20a885b8a593a191a756a38f0c2b5617a5b704d7c0c6d44720e0c527972 not found: ID does not exist" Nov 21 19:24:39 crc kubenswrapper[4701]: I1121 19:24:39.967999 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" path="/var/lib/kubelet/pods/d127bd92-3fee-42b0-b36a-1ec82c50925b/volumes" Nov 21 19:24:46 crc kubenswrapper[4701]: I1121 19:24:46.080988 4701 generic.go:334] "Generic (PLEG): container finished" podID="fcd41199-949d-4c9f-9154-f83acb9bb997" containerID="bc1c1d76dffe69c4705b949d820f18594e75795209c43265a9dc6d47b234474a" exitCode=0 Nov 21 19:24:46 crc kubenswrapper[4701]: I1121 19:24:46.081103 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcd41199-949d-4c9f-9154-f83acb9bb997","Type":"ContainerDied","Data":"bc1c1d76dffe69c4705b949d820f18594e75795209c43265a9dc6d47b234474a"} Nov 21 19:24:47 crc kubenswrapper[4701]: I1121 19:24:47.097586 4701 generic.go:334] "Generic (PLEG): container finished" podID="c53b35a3-36ed-43a5-a400-4658b9408596" containerID="8eb430fe96874aea2f612a9a8af7058cd5706de37766ae79763a0d4bdc6b5c81" exitCode=0 Nov 21 19:24:47 crc kubenswrapper[4701]: I1121 19:24:47.097670 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c53b35a3-36ed-43a5-a400-4658b9408596","Type":"ContainerDied","Data":"8eb430fe96874aea2f612a9a8af7058cd5706de37766ae79763a0d4bdc6b5c81"} Nov 21 19:24:47 crc kubenswrapper[4701]: I1121 19:24:47.103863 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcd41199-949d-4c9f-9154-f83acb9bb997","Type":"ContainerStarted","Data":"0f5758ad622cfeaf1b7fdc94b66f2b003bad68c8be29f61b07746a5536dc5c14"} Nov 21 19:24:47 crc kubenswrapper[4701]: I1121 19:24:47.104260 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 21 19:24:47 crc kubenswrapper[4701]: I1121 19:24:47.171243 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.171181702 podStartE2EDuration="39.171181702s" podCreationTimestamp="2025-11-21 19:24:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:24:47.157839163 +0000 UTC m=+1377.942979200" watchObservedRunningTime="2025-11-21 19:24:47.171181702 +0000 UTC m=+1377.956321769" Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.119414 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c53b35a3-36ed-43a5-a400-4658b9408596","Type":"ContainerStarted","Data":"cdd45a0254db93783ae6e0a780e0f16c885fe781d62f7408952a4f29f4ee847f"} Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.120508 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.169110 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.169069051 podStartE2EDuration="38.169069051s" podCreationTimestamp="2025-11-21 19:24:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:24:48.15194181 +0000 UTC m=+1378.937081837" watchObservedRunningTime="2025-11-21 19:24:48.169069051 +0000 UTC m=+1378.954209108" Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.613571 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.613661 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.613729 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.615080 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d6fe7caeaff234a352e7f7e2aad2e24b43f59b2b97fc616eef788494436369d1"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:24:48 crc kubenswrapper[4701]: I1121 19:24:48.615165 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://d6fe7caeaff234a352e7f7e2aad2e24b43f59b2b97fc616eef788494436369d1" gracePeriod=600 Nov 21 19:24:49 crc kubenswrapper[4701]: I1121 19:24:49.136149 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="d6fe7caeaff234a352e7f7e2aad2e24b43f59b2b97fc616eef788494436369d1" exitCode=0 Nov 21 19:24:49 crc kubenswrapper[4701]: I1121 19:24:49.136255 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"d6fe7caeaff234a352e7f7e2aad2e24b43f59b2b97fc616eef788494436369d1"} Nov 21 19:24:49 crc kubenswrapper[4701]: I1121 19:24:49.137019 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6"} Nov 21 19:24:49 crc kubenswrapper[4701]: I1121 19:24:49.137064 4701 scope.go:117] "RemoveContainer" containerID="0522a5d31d2783b232fd70ced5acfdf22c3becfa61b128f650faf72c65913cd6" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.456907 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n"] Nov 21 19:24:56 crc kubenswrapper[4701]: E1121 19:24:56.459535 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerName="init" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.459950 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerName="init" Nov 21 19:24:56 crc kubenswrapper[4701]: E1121 19:24:56.459969 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerName="dnsmasq-dns" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.459977 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerName="dnsmasq-dns" Nov 21 19:24:56 crc kubenswrapper[4701]: E1121 19:24:56.459998 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerName="dnsmasq-dns" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.460008 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerName="dnsmasq-dns" Nov 21 19:24:56 crc kubenswrapper[4701]: E1121 19:24:56.460035 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerName="init" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.460043 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerName="init" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.460335 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bc9a4f7-dbee-40f5-abff-15038163a9a4" containerName="dnsmasq-dns" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.460364 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d127bd92-3fee-42b0-b36a-1ec82c50925b" containerName="dnsmasq-dns" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.461592 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.464059 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.464502 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.466164 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.467479 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.477469 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n"] Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.631150 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.631336 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.631575 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfqcc\" (UniqueName: \"kubernetes.io/projected/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-kube-api-access-nfqcc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.631948 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.734884 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.735444 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfqcc\" (UniqueName: \"kubernetes.io/projected/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-kube-api-access-nfqcc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.735713 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.735958 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.744812 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.747733 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.748946 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.760193 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfqcc\" (UniqueName: \"kubernetes.io/projected/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-kube-api-access-nfqcc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:56 crc kubenswrapper[4701]: I1121 19:24:56.785661 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:24:57 crc kubenswrapper[4701]: I1121 19:24:57.470272 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n"] Nov 21 19:24:58 crc kubenswrapper[4701]: I1121 19:24:58.254265 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" event={"ID":"aa6fec42-0fdb-4b30-80b9-7cea4579dd05","Type":"ContainerStarted","Data":"cc468e7ccd1f0ec24143037e065b58306e92794cd1e06e7ffa742cd6770d250c"} Nov 21 19:24:58 crc kubenswrapper[4701]: I1121 19:24:58.871724 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="fcd41199-949d-4c9f-9154-f83acb9bb997" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.224:5671: connect: connection refused" Nov 21 19:25:00 crc kubenswrapper[4701]: I1121 19:25:00.892232 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c53b35a3-36ed-43a5-a400-4658b9408596" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.225:5671: connect: connection refused" Nov 21 19:25:05 crc kubenswrapper[4701]: I1121 19:25:05.359432 4701 scope.go:117] "RemoveContainer" containerID="354bf0446dc64d7962956bdb2c8cc22f747a2ec9da2ed6d8a1d35605511c6387" Nov 21 19:25:06 crc kubenswrapper[4701]: I1121 19:25:06.850633 4701 scope.go:117] "RemoveContainer" containerID="23976f19ebe00d7956f6e0886f759c5975d5f1211cd74b2153d838ba0ee0d812" Nov 21 19:25:06 crc kubenswrapper[4701]: I1121 19:25:06.922140 4701 scope.go:117] "RemoveContainer" containerID="218230f11f6a75f5f8c3f8e1683007a2bc0d9c050f4727d2a8f170029f3651d3" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.267923 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qt5rm"] Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.271770 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.283496 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qt5rm"] Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.385972 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" event={"ID":"aa6fec42-0fdb-4b30-80b9-7cea4579dd05","Type":"ContainerStarted","Data":"6a01e96b5c51e84c6b8f5599eaf3f049cf632fb40494646854fa40d889a45211"} Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.423921 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" podStartSLOduration=1.9646471060000001 podStartE2EDuration="11.423890444s" podCreationTimestamp="2025-11-21 19:24:56 +0000 UTC" firstStartedPulling="2025-11-21 19:24:57.474705219 +0000 UTC m=+1388.259845256" lastFinishedPulling="2025-11-21 19:25:06.933948547 +0000 UTC m=+1397.719088594" observedRunningTime="2025-11-21 19:25:07.409752763 +0000 UTC m=+1398.194892810" watchObservedRunningTime="2025-11-21 19:25:07.423890444 +0000 UTC m=+1398.209030481" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.448640 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsz6c\" (UniqueName: \"kubernetes.io/projected/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-kube-api-access-nsz6c\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.448856 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-utilities\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.448901 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-catalog-content\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.551632 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsz6c\" (UniqueName: \"kubernetes.io/projected/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-kube-api-access-nsz6c\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.552047 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-utilities\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.552113 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-catalog-content\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.552974 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-utilities\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.553129 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-catalog-content\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.587125 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsz6c\" (UniqueName: \"kubernetes.io/projected/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-kube-api-access-nsz6c\") pod \"certified-operators-qt5rm\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:07 crc kubenswrapper[4701]: I1121 19:25:07.591940 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:08 crc kubenswrapper[4701]: I1121 19:25:08.090256 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qt5rm"] Nov 21 19:25:08 crc kubenswrapper[4701]: I1121 19:25:08.404623 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerStarted","Data":"a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007"} Nov 21 19:25:08 crc kubenswrapper[4701]: I1121 19:25:08.405302 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerStarted","Data":"928de74dd98f15eefb838cb970e05a9cba0fbb1c1b2634f3d9860b4746c12cf6"} Nov 21 19:25:08 crc kubenswrapper[4701]: I1121 19:25:08.871620 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 21 19:25:09 crc kubenswrapper[4701]: I1121 19:25:09.433787 4701 generic.go:334] "Generic (PLEG): container finished" podID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerID="a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007" exitCode=0 Nov 21 19:25:09 crc kubenswrapper[4701]: I1121 19:25:09.434508 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerDied","Data":"a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007"} Nov 21 19:25:10 crc kubenswrapper[4701]: I1121 19:25:10.468280 4701 generic.go:334] "Generic (PLEG): container finished" podID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerID="067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4" exitCode=0 Nov 21 19:25:10 crc kubenswrapper[4701]: I1121 19:25:10.468679 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerDied","Data":"067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4"} Nov 21 19:25:10 crc kubenswrapper[4701]: I1121 19:25:10.890534 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 21 19:25:11 crc kubenswrapper[4701]: I1121 19:25:11.489765 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerStarted","Data":"9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5"} Nov 21 19:25:11 crc kubenswrapper[4701]: I1121 19:25:11.532833 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qt5rm" podStartSLOduration=2.044086884 podStartE2EDuration="4.532802398s" podCreationTimestamp="2025-11-21 19:25:07 +0000 UTC" firstStartedPulling="2025-11-21 19:25:08.407184189 +0000 UTC m=+1399.192324256" lastFinishedPulling="2025-11-21 19:25:10.895899743 +0000 UTC m=+1401.681039770" observedRunningTime="2025-11-21 19:25:11.513648892 +0000 UTC m=+1402.298788939" watchObservedRunningTime="2025-11-21 19:25:11.532802398 +0000 UTC m=+1402.317942445" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.247802 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kjphp"] Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.251050 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.271370 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kjphp"] Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.379027 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-utilities\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.379507 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csgx9\" (UniqueName: \"kubernetes.io/projected/a6d369cb-b7aa-4c5f-add0-b06054062693-kube-api-access-csgx9\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.379976 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-catalog-content\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.482710 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-catalog-content\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.482790 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-utilities\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.482884 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csgx9\" (UniqueName: \"kubernetes.io/projected/a6d369cb-b7aa-4c5f-add0-b06054062693-kube-api-access-csgx9\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.483632 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-catalog-content\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.483717 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-utilities\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.514701 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csgx9\" (UniqueName: \"kubernetes.io/projected/a6d369cb-b7aa-4c5f-add0-b06054062693-kube-api-access-csgx9\") pod \"redhat-operators-kjphp\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:12 crc kubenswrapper[4701]: I1121 19:25:12.576608 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:13 crc kubenswrapper[4701]: I1121 19:25:13.117037 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kjphp"] Nov 21 19:25:13 crc kubenswrapper[4701]: I1121 19:25:13.515255 4701 generic.go:334] "Generic (PLEG): container finished" podID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerID="ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390" exitCode=0 Nov 21 19:25:13 crc kubenswrapper[4701]: I1121 19:25:13.515383 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerDied","Data":"ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390"} Nov 21 19:25:13 crc kubenswrapper[4701]: I1121 19:25:13.515730 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerStarted","Data":"b0fbb74d02f4088e33f445704301346d9b75b874d53629ab5610019223a7d175"} Nov 21 19:25:14 crc kubenswrapper[4701]: I1121 19:25:14.527864 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerStarted","Data":"452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3"} Nov 21 19:25:17 crc kubenswrapper[4701]: I1121 19:25:17.592832 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:17 crc kubenswrapper[4701]: I1121 19:25:17.593885 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:17 crc kubenswrapper[4701]: I1121 19:25:17.668982 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:18 crc kubenswrapper[4701]: I1121 19:25:18.576136 4701 generic.go:334] "Generic (PLEG): container finished" podID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerID="452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3" exitCode=0 Nov 21 19:25:18 crc kubenswrapper[4701]: I1121 19:25:18.576211 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerDied","Data":"452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3"} Nov 21 19:25:18 crc kubenswrapper[4701]: I1121 19:25:18.634644 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:19 crc kubenswrapper[4701]: I1121 19:25:19.591586 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerStarted","Data":"5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573"} Nov 21 19:25:19 crc kubenswrapper[4701]: I1121 19:25:19.594047 4701 generic.go:334] "Generic (PLEG): container finished" podID="aa6fec42-0fdb-4b30-80b9-7cea4579dd05" containerID="6a01e96b5c51e84c6b8f5599eaf3f049cf632fb40494646854fa40d889a45211" exitCode=0 Nov 21 19:25:19 crc kubenswrapper[4701]: I1121 19:25:19.594140 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" event={"ID":"aa6fec42-0fdb-4b30-80b9-7cea4579dd05","Type":"ContainerDied","Data":"6a01e96b5c51e84c6b8f5599eaf3f049cf632fb40494646854fa40d889a45211"} Nov 21 19:25:19 crc kubenswrapper[4701]: I1121 19:25:19.625339 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kjphp" podStartSLOduration=2.018492501 podStartE2EDuration="7.625320072s" podCreationTimestamp="2025-11-21 19:25:12 +0000 UTC" firstStartedPulling="2025-11-21 19:25:13.517099466 +0000 UTC m=+1404.302239493" lastFinishedPulling="2025-11-21 19:25:19.123926997 +0000 UTC m=+1409.909067064" observedRunningTime="2025-11-21 19:25:19.615235711 +0000 UTC m=+1410.400375738" watchObservedRunningTime="2025-11-21 19:25:19.625320072 +0000 UTC m=+1410.410460099" Nov 21 19:25:19 crc kubenswrapper[4701]: I1121 19:25:19.827136 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qt5rm"] Nov 21 19:25:20 crc kubenswrapper[4701]: I1121 19:25:20.605723 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qt5rm" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="registry-server" containerID="cri-o://9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5" gracePeriod=2 Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.268260 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.382698 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.394786 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-repo-setup-combined-ca-bundle\") pod \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.394896 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfqcc\" (UniqueName: \"kubernetes.io/projected/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-kube-api-access-nfqcc\") pod \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.395121 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-inventory\") pod \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.395908 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-ssh-key\") pod \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\" (UID: \"aa6fec42-0fdb-4b30-80b9-7cea4579dd05\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.404458 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-kube-api-access-nfqcc" (OuterVolumeSpecName: "kube-api-access-nfqcc") pod "aa6fec42-0fdb-4b30-80b9-7cea4579dd05" (UID: "aa6fec42-0fdb-4b30-80b9-7cea4579dd05"). InnerVolumeSpecName "kube-api-access-nfqcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.443600 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "aa6fec42-0fdb-4b30-80b9-7cea4579dd05" (UID: "aa6fec42-0fdb-4b30-80b9-7cea4579dd05"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.456686 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "aa6fec42-0fdb-4b30-80b9-7cea4579dd05" (UID: "aa6fec42-0fdb-4b30-80b9-7cea4579dd05"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.478852 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-inventory" (OuterVolumeSpecName: "inventory") pod "aa6fec42-0fdb-4b30-80b9-7cea4579dd05" (UID: "aa6fec42-0fdb-4b30-80b9-7cea4579dd05"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.501832 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsz6c\" (UniqueName: \"kubernetes.io/projected/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-kube-api-access-nsz6c\") pod \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.502073 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-catalog-content\") pod \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.502257 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-utilities\") pod \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\" (UID: \"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996\") " Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.502915 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-utilities" (OuterVolumeSpecName: "utilities") pod "2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" (UID: "2adfbbf8-bdd1-4c31-9edf-9b56c50f7996"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.502968 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfqcc\" (UniqueName: \"kubernetes.io/projected/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-kube-api-access-nfqcc\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.502987 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.503001 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.503013 4701 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa6fec42-0fdb-4b30-80b9-7cea4579dd05-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.506260 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-kube-api-access-nsz6c" (OuterVolumeSpecName: "kube-api-access-nsz6c") pod "2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" (UID: "2adfbbf8-bdd1-4c31-9edf-9b56c50f7996"). InnerVolumeSpecName "kube-api-access-nsz6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.569830 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" (UID: "2adfbbf8-bdd1-4c31-9edf-9b56c50f7996"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.605692 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.605736 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.605749 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsz6c\" (UniqueName: \"kubernetes.io/projected/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996-kube-api-access-nsz6c\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.618843 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" event={"ID":"aa6fec42-0fdb-4b30-80b9-7cea4579dd05","Type":"ContainerDied","Data":"cc468e7ccd1f0ec24143037e065b58306e92794cd1e06e7ffa742cd6770d250c"} Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.618916 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.618932 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc468e7ccd1f0ec24143037e065b58306e92794cd1e06e7ffa742cd6770d250c" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.622176 4701 generic.go:334] "Generic (PLEG): container finished" podID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerID="9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5" exitCode=0 Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.622288 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerDied","Data":"9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5"} Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.622307 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt5rm" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.622334 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt5rm" event={"ID":"2adfbbf8-bdd1-4c31-9edf-9b56c50f7996","Type":"ContainerDied","Data":"928de74dd98f15eefb838cb970e05a9cba0fbb1c1b2634f3d9860b4746c12cf6"} Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.622388 4701 scope.go:117] "RemoveContainer" containerID="9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.662274 4701 scope.go:117] "RemoveContainer" containerID="067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.686045 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qt5rm"] Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.700716 4701 scope.go:117] "RemoveContainer" containerID="a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.712587 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qt5rm"] Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.744678 4701 scope.go:117] "RemoveContainer" containerID="9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5" Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.746575 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5\": container with ID starting with 9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5 not found: ID does not exist" containerID="9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.746636 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5"} err="failed to get container status \"9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5\": rpc error: code = NotFound desc = could not find container \"9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5\": container with ID starting with 9206c75ab8bf592ff0f35e3a5be642bc9d04f0fe96073ad589db979defe312c5 not found: ID does not exist" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.746661 4701 scope.go:117] "RemoveContainer" containerID="067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4" Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.749113 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4\": container with ID starting with 067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4 not found: ID does not exist" containerID="067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.749174 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4"} err="failed to get container status \"067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4\": rpc error: code = NotFound desc = could not find container \"067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4\": container with ID starting with 067d851ebce64c0b6667680f73be614705cb7e88a7fc11c5bf781a5f9233e1a4 not found: ID does not exist" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.749232 4701 scope.go:117] "RemoveContainer" containerID="a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.752327 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh"] Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.752865 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="extract-content" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.752893 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="extract-content" Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.752941 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa6fec42-0fdb-4b30-80b9-7cea4579dd05" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.752951 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa6fec42-0fdb-4b30-80b9-7cea4579dd05" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.752964 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="registry-server" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.752971 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="registry-server" Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.752985 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="extract-utilities" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.752991 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="extract-utilities" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.753190 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa6fec42-0fdb-4b30-80b9-7cea4579dd05" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.753382 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" containerName="registry-server" Nov 21 19:25:21 crc kubenswrapper[4701]: E1121 19:25:21.753917 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007\": container with ID starting with a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007 not found: ID does not exist" containerID="a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.753955 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007"} err="failed to get container status \"a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007\": rpc error: code = NotFound desc = could not find container \"a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007\": container with ID starting with a99e3bddfa5a6777204c0e06108b57a4566b2754df0ed270efe1eae54f7af007 not found: ID does not exist" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.754285 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.757825 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.759248 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.759942 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.760251 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.764626 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh"] Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.810229 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.810283 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzkrb\" (UniqueName: \"kubernetes.io/projected/c3df9720-470f-4076-93ad-cd09d2b8c1d4-kube-api-access-kzkrb\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.810323 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.912936 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.913003 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzkrb\" (UniqueName: \"kubernetes.io/projected/c3df9720-470f-4076-93ad-cd09d2b8c1d4-kube-api-access-kzkrb\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.913050 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.917990 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.919244 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.931978 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzkrb\" (UniqueName: \"kubernetes.io/projected/c3df9720-470f-4076-93ad-cd09d2b8c1d4-kube-api-access-kzkrb\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-8vgkh\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:21 crc kubenswrapper[4701]: I1121 19:25:21.964786 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2adfbbf8-bdd1-4c31-9edf-9b56c50f7996" path="/var/lib/kubelet/pods/2adfbbf8-bdd1-4c31-9edf-9b56c50f7996/volumes" Nov 21 19:25:22 crc kubenswrapper[4701]: I1121 19:25:22.119085 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:22 crc kubenswrapper[4701]: I1121 19:25:22.568847 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh"] Nov 21 19:25:22 crc kubenswrapper[4701]: I1121 19:25:22.576771 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:22 crc kubenswrapper[4701]: I1121 19:25:22.576817 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:22 crc kubenswrapper[4701]: I1121 19:25:22.641156 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" event={"ID":"c3df9720-470f-4076-93ad-cd09d2b8c1d4","Type":"ContainerStarted","Data":"048c162d13d9689b33ff85ef4fd432f8c3526b13e65431c4d08ef8e3ecf8aac1"} Nov 21 19:25:23 crc kubenswrapper[4701]: I1121 19:25:23.651060 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kjphp" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="registry-server" probeResult="failure" output=< Nov 21 19:25:23 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:25:23 crc kubenswrapper[4701]: > Nov 21 19:25:23 crc kubenswrapper[4701]: I1121 19:25:23.654010 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" event={"ID":"c3df9720-470f-4076-93ad-cd09d2b8c1d4","Type":"ContainerStarted","Data":"f67b8dc06ffda6a8d0cca9d12a6b4eb1140089d048ae9a77b88aa98bc45ee58f"} Nov 21 19:25:23 crc kubenswrapper[4701]: I1121 19:25:23.677479 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" podStartSLOduration=2.258858342 podStartE2EDuration="2.677457448s" podCreationTimestamp="2025-11-21 19:25:21 +0000 UTC" firstStartedPulling="2025-11-21 19:25:22.569535075 +0000 UTC m=+1413.354675092" lastFinishedPulling="2025-11-21 19:25:22.988134171 +0000 UTC m=+1413.773274198" observedRunningTime="2025-11-21 19:25:23.670650765 +0000 UTC m=+1414.455790792" watchObservedRunningTime="2025-11-21 19:25:23.677457448 +0000 UTC m=+1414.462597475" Nov 21 19:25:26 crc kubenswrapper[4701]: I1121 19:25:26.690933 4701 generic.go:334] "Generic (PLEG): container finished" podID="c3df9720-470f-4076-93ad-cd09d2b8c1d4" containerID="f67b8dc06ffda6a8d0cca9d12a6b4eb1140089d048ae9a77b88aa98bc45ee58f" exitCode=0 Nov 21 19:25:26 crc kubenswrapper[4701]: I1121 19:25:26.691084 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" event={"ID":"c3df9720-470f-4076-93ad-cd09d2b8c1d4","Type":"ContainerDied","Data":"f67b8dc06ffda6a8d0cca9d12a6b4eb1140089d048ae9a77b88aa98bc45ee58f"} Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.143259 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.181937 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzkrb\" (UniqueName: \"kubernetes.io/projected/c3df9720-470f-4076-93ad-cd09d2b8c1d4-kube-api-access-kzkrb\") pod \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.182261 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-inventory\") pod \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.182375 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-ssh-key\") pod \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\" (UID: \"c3df9720-470f-4076-93ad-cd09d2b8c1d4\") " Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.207103 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3df9720-470f-4076-93ad-cd09d2b8c1d4-kube-api-access-kzkrb" (OuterVolumeSpecName: "kube-api-access-kzkrb") pod "c3df9720-470f-4076-93ad-cd09d2b8c1d4" (UID: "c3df9720-470f-4076-93ad-cd09d2b8c1d4"). InnerVolumeSpecName "kube-api-access-kzkrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.234450 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-inventory" (OuterVolumeSpecName: "inventory") pod "c3df9720-470f-4076-93ad-cd09d2b8c1d4" (UID: "c3df9720-470f-4076-93ad-cd09d2b8c1d4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.240819 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c3df9720-470f-4076-93ad-cd09d2b8c1d4" (UID: "c3df9720-470f-4076-93ad-cd09d2b8c1d4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.286377 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzkrb\" (UniqueName: \"kubernetes.io/projected/c3df9720-470f-4076-93ad-cd09d2b8c1d4-kube-api-access-kzkrb\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.286456 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.286475 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c3df9720-470f-4076-93ad-cd09d2b8c1d4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.714441 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" event={"ID":"c3df9720-470f-4076-93ad-cd09d2b8c1d4","Type":"ContainerDied","Data":"048c162d13d9689b33ff85ef4fd432f8c3526b13e65431c4d08ef8e3ecf8aac1"} Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.714497 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="048c162d13d9689b33ff85ef4fd432f8c3526b13e65431c4d08ef8e3ecf8aac1" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.714526 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-8vgkh" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.831698 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd"] Nov 21 19:25:28 crc kubenswrapper[4701]: E1121 19:25:28.832984 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3df9720-470f-4076-93ad-cd09d2b8c1d4" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.833238 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3df9720-470f-4076-93ad-cd09d2b8c1d4" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.833873 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3df9720-470f-4076-93ad-cd09d2b8c1d4" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.835883 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.840352 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.841309 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.841415 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.842229 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd"] Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.844697 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.908981 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.909041 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.909139 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:28 crc kubenswrapper[4701]: I1121 19:25:28.909229 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m29p\" (UniqueName: \"kubernetes.io/projected/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-kube-api-access-5m29p\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.011006 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.011058 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.011132 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.011178 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m29p\" (UniqueName: \"kubernetes.io/projected/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-kube-api-access-5m29p\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.015672 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.016467 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.018275 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.031958 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m29p\" (UniqueName: \"kubernetes.io/projected/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-kube-api-access-5m29p\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.166545 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:25:29 crc kubenswrapper[4701]: I1121 19:25:29.820122 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd"] Nov 21 19:25:30 crc kubenswrapper[4701]: I1121 19:25:30.744107 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" event={"ID":"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2","Type":"ContainerStarted","Data":"bbbeb7a906e3da13725a85552576e896aefcf6d45a06ac0d5002cb30702ad503"} Nov 21 19:25:30 crc kubenswrapper[4701]: I1121 19:25:30.745188 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" event={"ID":"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2","Type":"ContainerStarted","Data":"c5c7cb8cc8b1f139bc8f9bb1cc4db4c70eb771c14eb64dd330a7c3d27806a6a1"} Nov 21 19:25:30 crc kubenswrapper[4701]: I1121 19:25:30.772417 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" podStartSLOduration=2.298888448 podStartE2EDuration="2.772379841s" podCreationTimestamp="2025-11-21 19:25:28 +0000 UTC" firstStartedPulling="2025-11-21 19:25:29.836556075 +0000 UTC m=+1420.621696102" lastFinishedPulling="2025-11-21 19:25:30.310047428 +0000 UTC m=+1421.095187495" observedRunningTime="2025-11-21 19:25:30.771674051 +0000 UTC m=+1421.556814118" watchObservedRunningTime="2025-11-21 19:25:30.772379841 +0000 UTC m=+1421.557519908" Nov 21 19:25:32 crc kubenswrapper[4701]: I1121 19:25:32.658136 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:32 crc kubenswrapper[4701]: I1121 19:25:32.726467 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:32 crc kubenswrapper[4701]: I1121 19:25:32.917961 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kjphp"] Nov 21 19:25:33 crc kubenswrapper[4701]: I1121 19:25:33.808774 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kjphp" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="registry-server" containerID="cri-o://5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573" gracePeriod=2 Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.354771 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.461263 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csgx9\" (UniqueName: \"kubernetes.io/projected/a6d369cb-b7aa-4c5f-add0-b06054062693-kube-api-access-csgx9\") pod \"a6d369cb-b7aa-4c5f-add0-b06054062693\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.461551 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-catalog-content\") pod \"a6d369cb-b7aa-4c5f-add0-b06054062693\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.461694 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-utilities\") pod \"a6d369cb-b7aa-4c5f-add0-b06054062693\" (UID: \"a6d369cb-b7aa-4c5f-add0-b06054062693\") " Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.462517 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-utilities" (OuterVolumeSpecName: "utilities") pod "a6d369cb-b7aa-4c5f-add0-b06054062693" (UID: "a6d369cb-b7aa-4c5f-add0-b06054062693"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.462956 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.474642 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6d369cb-b7aa-4c5f-add0-b06054062693-kube-api-access-csgx9" (OuterVolumeSpecName: "kube-api-access-csgx9") pod "a6d369cb-b7aa-4c5f-add0-b06054062693" (UID: "a6d369cb-b7aa-4c5f-add0-b06054062693"). InnerVolumeSpecName "kube-api-access-csgx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.566651 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csgx9\" (UniqueName: \"kubernetes.io/projected/a6d369cb-b7aa-4c5f-add0-b06054062693-kube-api-access-csgx9\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.606719 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6d369cb-b7aa-4c5f-add0-b06054062693" (UID: "a6d369cb-b7aa-4c5f-add0-b06054062693"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.668528 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6d369cb-b7aa-4c5f-add0-b06054062693-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.831655 4701 generic.go:334] "Generic (PLEG): container finished" podID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerID="5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573" exitCode=0 Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.831731 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerDied","Data":"5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573"} Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.831779 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjphp" event={"ID":"a6d369cb-b7aa-4c5f-add0-b06054062693","Type":"ContainerDied","Data":"b0fbb74d02f4088e33f445704301346d9b75b874d53629ab5610019223a7d175"} Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.831772 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjphp" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.831807 4701 scope.go:117] "RemoveContainer" containerID="5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.874298 4701 scope.go:117] "RemoveContainer" containerID="452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.884386 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kjphp"] Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.894011 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kjphp"] Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.919399 4701 scope.go:117] "RemoveContainer" containerID="ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.976092 4701 scope.go:117] "RemoveContainer" containerID="5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573" Nov 21 19:25:34 crc kubenswrapper[4701]: E1121 19:25:34.976713 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573\": container with ID starting with 5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573 not found: ID does not exist" containerID="5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.976775 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573"} err="failed to get container status \"5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573\": rpc error: code = NotFound desc = could not find container \"5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573\": container with ID starting with 5ccca81bba7298865fb11d2b127de7a4f45f5253dda6446c4ecffdee87347573 not found: ID does not exist" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.976819 4701 scope.go:117] "RemoveContainer" containerID="452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3" Nov 21 19:25:34 crc kubenswrapper[4701]: E1121 19:25:34.977401 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3\": container with ID starting with 452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3 not found: ID does not exist" containerID="452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.977445 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3"} err="failed to get container status \"452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3\": rpc error: code = NotFound desc = could not find container \"452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3\": container with ID starting with 452479aa379882d5d504b0b849341f54985d24bf72346ce7bcc1eff2e9d347a3 not found: ID does not exist" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.977474 4701 scope.go:117] "RemoveContainer" containerID="ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390" Nov 21 19:25:34 crc kubenswrapper[4701]: E1121 19:25:34.977870 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390\": container with ID starting with ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390 not found: ID does not exist" containerID="ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390" Nov 21 19:25:34 crc kubenswrapper[4701]: I1121 19:25:34.977928 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390"} err="failed to get container status \"ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390\": rpc error: code = NotFound desc = could not find container \"ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390\": container with ID starting with ac30f58fa4b5a9ee730432faa673b2ef98528e10601ca52cac9529dc98c42390 not found: ID does not exist" Nov 21 19:25:35 crc kubenswrapper[4701]: I1121 19:25:35.970670 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" path="/var/lib/kubelet/pods/a6d369cb-b7aa-4c5f-add0-b06054062693/volumes" Nov 21 19:26:07 crc kubenswrapper[4701]: I1121 19:26:07.178021 4701 scope.go:117] "RemoveContainer" containerID="4bd04379b743585bea0354cfac0637eb57de9c98620b1951ec6024eb46b07e09" Nov 21 19:26:07 crc kubenswrapper[4701]: I1121 19:26:07.232622 4701 scope.go:117] "RemoveContainer" containerID="bec4522f6cfd9c8b102a94c7b53c32fdf41063ea44f1a6d97edd1f49d43218f1" Nov 21 19:26:07 crc kubenswrapper[4701]: I1121 19:26:07.426463 4701 scope.go:117] "RemoveContainer" containerID="430f8778df60ba3d059bc9aa9fa12d81c20d41994db5d5fd007530b6d67dbe5f" Nov 21 19:26:48 crc kubenswrapper[4701]: I1121 19:26:48.614120 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:26:48 crc kubenswrapper[4701]: I1121 19:26:48.615058 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:27:18 crc kubenswrapper[4701]: I1121 19:27:18.613812 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:27:18 crc kubenswrapper[4701]: I1121 19:27:18.614601 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.613592 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.614489 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.614565 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.615723 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.615782 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" gracePeriod=600 Nov 21 19:27:48 crc kubenswrapper[4701]: E1121 19:27:48.744159 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.827515 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" exitCode=0 Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.827606 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6"} Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.827717 4701 scope.go:117] "RemoveContainer" containerID="d6fe7caeaff234a352e7f7e2aad2e24b43f59b2b97fc616eef788494436369d1" Nov 21 19:27:48 crc kubenswrapper[4701]: I1121 19:27:48.828866 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:27:48 crc kubenswrapper[4701]: E1121 19:27:48.829780 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:27:59 crc kubenswrapper[4701]: I1121 19:27:59.964450 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:27:59 crc kubenswrapper[4701]: E1121 19:27:59.965675 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:28:11 crc kubenswrapper[4701]: I1121 19:28:11.956877 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:28:11 crc kubenswrapper[4701]: E1121 19:28:11.957930 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.766806 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xc9b9"] Nov 21 19:28:13 crc kubenswrapper[4701]: E1121 19:28:13.768151 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="extract-utilities" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.768172 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="extract-utilities" Nov 21 19:28:13 crc kubenswrapper[4701]: E1121 19:28:13.768227 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="registry-server" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.768237 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="registry-server" Nov 21 19:28:13 crc kubenswrapper[4701]: E1121 19:28:13.768273 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="extract-content" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.768281 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="extract-content" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.768545 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6d369cb-b7aa-4c5f-add0-b06054062693" containerName="registry-server" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.771035 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.787089 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xc9b9"] Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.884623 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlwjs\" (UniqueName: \"kubernetes.io/projected/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-kube-api-access-xlwjs\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.884861 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-utilities\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.884904 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-catalog-content\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.988111 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-utilities\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.988165 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-catalog-content\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.988343 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlwjs\" (UniqueName: \"kubernetes.io/projected/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-kube-api-access-xlwjs\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.988764 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-utilities\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:13 crc kubenswrapper[4701]: I1121 19:28:13.989242 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-catalog-content\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:14 crc kubenswrapper[4701]: I1121 19:28:14.015666 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlwjs\" (UniqueName: \"kubernetes.io/projected/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-kube-api-access-xlwjs\") pod \"redhat-marketplace-xc9b9\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:14 crc kubenswrapper[4701]: I1121 19:28:14.149505 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:14 crc kubenswrapper[4701]: I1121 19:28:14.688681 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xc9b9"] Nov 21 19:28:14 crc kubenswrapper[4701]: W1121 19:28:14.701721 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a8ccc76_9a49_4840_99ce_d87c21a3d0a2.slice/crio-f9de39cae1aa925975baa62d5992b15e9b5b9d4da20cdb238a01b8411b17171b WatchSource:0}: Error finding container f9de39cae1aa925975baa62d5992b15e9b5b9d4da20cdb238a01b8411b17171b: Status 404 returned error can't find the container with id f9de39cae1aa925975baa62d5992b15e9b5b9d4da20cdb238a01b8411b17171b Nov 21 19:28:15 crc kubenswrapper[4701]: I1121 19:28:15.207099 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerID="0da72f3b033e7cccec41d992b161e5a7ca63937de9da5f52094198f5fa293d9b" exitCode=0 Nov 21 19:28:15 crc kubenswrapper[4701]: I1121 19:28:15.207508 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xc9b9" event={"ID":"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2","Type":"ContainerDied","Data":"0da72f3b033e7cccec41d992b161e5a7ca63937de9da5f52094198f5fa293d9b"} Nov 21 19:28:15 crc kubenswrapper[4701]: I1121 19:28:15.207581 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xc9b9" event={"ID":"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2","Type":"ContainerStarted","Data":"f9de39cae1aa925975baa62d5992b15e9b5b9d4da20cdb238a01b8411b17171b"} Nov 21 19:28:15 crc kubenswrapper[4701]: I1121 19:28:15.210479 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.170528 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fc87q"] Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.174036 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.184248 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fc87q"] Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.243367 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-catalog-content\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.243470 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-utilities\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.243584 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhldz\" (UniqueName: \"kubernetes.io/projected/9961b99c-11d1-450a-9125-f48bee433d17-kube-api-access-nhldz\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.348894 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhldz\" (UniqueName: \"kubernetes.io/projected/9961b99c-11d1-450a-9125-f48bee433d17-kube-api-access-nhldz\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.349111 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-catalog-content\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.349211 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-utilities\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.350342 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-utilities\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.350634 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-catalog-content\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.376902 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhldz\" (UniqueName: \"kubernetes.io/projected/9961b99c-11d1-450a-9125-f48bee433d17-kube-api-access-nhldz\") pod \"community-operators-fc87q\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:16 crc kubenswrapper[4701]: I1121 19:28:16.552316 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:17 crc kubenswrapper[4701]: W1121 19:28:17.073572 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9961b99c_11d1_450a_9125_f48bee433d17.slice/crio-1613c179a145510d02742c522801f7c5888a2edd71151c98b3f962dca4a3cec1 WatchSource:0}: Error finding container 1613c179a145510d02742c522801f7c5888a2edd71151c98b3f962dca4a3cec1: Status 404 returned error can't find the container with id 1613c179a145510d02742c522801f7c5888a2edd71151c98b3f962dca4a3cec1 Nov 21 19:28:17 crc kubenswrapper[4701]: I1121 19:28:17.075417 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fc87q"] Nov 21 19:28:17 crc kubenswrapper[4701]: I1121 19:28:17.279412 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerID="d553f398d6e72952d0ad5227a5092593de1d654683e975c9b0af35209794a9a7" exitCode=0 Nov 21 19:28:17 crc kubenswrapper[4701]: I1121 19:28:17.279477 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xc9b9" event={"ID":"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2","Type":"ContainerDied","Data":"d553f398d6e72952d0ad5227a5092593de1d654683e975c9b0af35209794a9a7"} Nov 21 19:28:17 crc kubenswrapper[4701]: I1121 19:28:17.285833 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerStarted","Data":"1613c179a145510d02742c522801f7c5888a2edd71151c98b3f962dca4a3cec1"} Nov 21 19:28:18 crc kubenswrapper[4701]: I1121 19:28:18.300856 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xc9b9" event={"ID":"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2","Type":"ContainerStarted","Data":"79e62b2870db690dfef19fd635e876067f1f16f78eac46074ea98083eb14ffd9"} Nov 21 19:28:18 crc kubenswrapper[4701]: I1121 19:28:18.303340 4701 generic.go:334] "Generic (PLEG): container finished" podID="9961b99c-11d1-450a-9125-f48bee433d17" containerID="34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10" exitCode=0 Nov 21 19:28:18 crc kubenswrapper[4701]: I1121 19:28:18.303386 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerDied","Data":"34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10"} Nov 21 19:28:18 crc kubenswrapper[4701]: I1121 19:28:18.334533 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xc9b9" podStartSLOduration=2.847869367 podStartE2EDuration="5.334508391s" podCreationTimestamp="2025-11-21 19:28:13 +0000 UTC" firstStartedPulling="2025-11-21 19:28:15.210052999 +0000 UTC m=+1585.995193026" lastFinishedPulling="2025-11-21 19:28:17.696691983 +0000 UTC m=+1588.481832050" observedRunningTime="2025-11-21 19:28:18.324670766 +0000 UTC m=+1589.109810793" watchObservedRunningTime="2025-11-21 19:28:18.334508391 +0000 UTC m=+1589.119648418" Nov 21 19:28:19 crc kubenswrapper[4701]: I1121 19:28:19.320465 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerStarted","Data":"5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf"} Nov 21 19:28:20 crc kubenswrapper[4701]: I1121 19:28:20.342600 4701 generic.go:334] "Generic (PLEG): container finished" podID="9961b99c-11d1-450a-9125-f48bee433d17" containerID="5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf" exitCode=0 Nov 21 19:28:20 crc kubenswrapper[4701]: I1121 19:28:20.342676 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerDied","Data":"5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf"} Nov 21 19:28:22 crc kubenswrapper[4701]: I1121 19:28:22.370776 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerStarted","Data":"365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae"} Nov 21 19:28:24 crc kubenswrapper[4701]: I1121 19:28:24.150421 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:24 crc kubenswrapper[4701]: I1121 19:28:24.151343 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:24 crc kubenswrapper[4701]: I1121 19:28:24.224569 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:24 crc kubenswrapper[4701]: I1121 19:28:24.252222 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fc87q" podStartSLOduration=5.449791625 podStartE2EDuration="8.252180564s" podCreationTimestamp="2025-11-21 19:28:16 +0000 UTC" firstStartedPulling="2025-11-21 19:28:18.3058804 +0000 UTC m=+1589.091020437" lastFinishedPulling="2025-11-21 19:28:21.108269339 +0000 UTC m=+1591.893409376" observedRunningTime="2025-11-21 19:28:22.400781519 +0000 UTC m=+1593.185921586" watchObservedRunningTime="2025-11-21 19:28:24.252180564 +0000 UTC m=+1595.037320581" Nov 21 19:28:24 crc kubenswrapper[4701]: I1121 19:28:24.487027 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:24 crc kubenswrapper[4701]: I1121 19:28:24.953368 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xc9b9"] Nov 21 19:28:26 crc kubenswrapper[4701]: I1121 19:28:26.443326 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xc9b9" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="registry-server" containerID="cri-o://79e62b2870db690dfef19fd635e876067f1f16f78eac46074ea98083eb14ffd9" gracePeriod=2 Nov 21 19:28:26 crc kubenswrapper[4701]: I1121 19:28:26.553239 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:26 crc kubenswrapper[4701]: I1121 19:28:26.553313 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:26 crc kubenswrapper[4701]: I1121 19:28:26.616599 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:26 crc kubenswrapper[4701]: I1121 19:28:26.950590 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:28:26 crc kubenswrapper[4701]: E1121 19:28:26.951149 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:28:27 crc kubenswrapper[4701]: I1121 19:28:27.466881 4701 generic.go:334] "Generic (PLEG): container finished" podID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerID="79e62b2870db690dfef19fd635e876067f1f16f78eac46074ea98083eb14ffd9" exitCode=0 Nov 21 19:28:27 crc kubenswrapper[4701]: I1121 19:28:27.466976 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xc9b9" event={"ID":"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2","Type":"ContainerDied","Data":"79e62b2870db690dfef19fd635e876067f1f16f78eac46074ea98083eb14ffd9"} Nov 21 19:28:27 crc kubenswrapper[4701]: I1121 19:28:27.553588 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.117721 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.267429 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlwjs\" (UniqueName: \"kubernetes.io/projected/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-kube-api-access-xlwjs\") pod \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.267686 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-catalog-content\") pod \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.267793 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-utilities\") pod \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\" (UID: \"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2\") " Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.269075 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-utilities" (OuterVolumeSpecName: "utilities") pod "8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" (UID: "8a8ccc76-9a49-4840-99ce-d87c21a3d0a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.276414 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-kube-api-access-xlwjs" (OuterVolumeSpecName: "kube-api-access-xlwjs") pod "8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" (UID: "8a8ccc76-9a49-4840-99ce-d87c21a3d0a2"). InnerVolumeSpecName "kube-api-access-xlwjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.301539 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" (UID: "8a8ccc76-9a49-4840-99ce-d87c21a3d0a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.346354 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fc87q"] Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.370482 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.370521 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlwjs\" (UniqueName: \"kubernetes.io/projected/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-kube-api-access-xlwjs\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.370532 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.487843 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xc9b9" event={"ID":"8a8ccc76-9a49-4840-99ce-d87c21a3d0a2","Type":"ContainerDied","Data":"f9de39cae1aa925975baa62d5992b15e9b5b9d4da20cdb238a01b8411b17171b"} Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.487887 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xc9b9" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.487952 4701 scope.go:117] "RemoveContainer" containerID="79e62b2870db690dfef19fd635e876067f1f16f78eac46074ea98083eb14ffd9" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.528567 4701 scope.go:117] "RemoveContainer" containerID="d553f398d6e72952d0ad5227a5092593de1d654683e975c9b0af35209794a9a7" Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.540064 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xc9b9"] Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.557060 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xc9b9"] Nov 21 19:28:28 crc kubenswrapper[4701]: I1121 19:28:28.576498 4701 scope.go:117] "RemoveContainer" containerID="0da72f3b033e7cccec41d992b161e5a7ca63937de9da5f52094198f5fa293d9b" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.101179 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-h5fzs"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.120407 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-d97b-account-create-cxhsn"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.130220 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-3a61-account-create-77s5z"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.140519 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-f7jkr"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.149768 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-t695v"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.158654 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-65e8-account-create-xh7st"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.167457 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-h5fzs"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.175358 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-d97b-account-create-cxhsn"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.183918 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-f7jkr"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.191446 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-65e8-account-create-xh7st"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.203371 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-3a61-account-create-77s5z"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.211755 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-t695v"] Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.502539 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fc87q" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="registry-server" containerID="cri-o://365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae" gracePeriod=2 Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.964088 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1708d089-4719-4de2-af63-748de46758d4" path="/var/lib/kubelet/pods/1708d089-4719-4de2-af63-748de46758d4/volumes" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.964755 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e5bfc4d-be86-4362-bd2f-9dc613af744a" path="/var/lib/kubelet/pods/2e5bfc4d-be86-4362-bd2f-9dc613af744a/volumes" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.965345 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="372e7b57-848c-4342-a448-3b3cb9b80aa4" path="/var/lib/kubelet/pods/372e7b57-848c-4342-a448-3b3cb9b80aa4/volumes" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.965866 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" path="/var/lib/kubelet/pods/8a8ccc76-9a49-4840-99ce-d87c21a3d0a2/volumes" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.967331 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae19d0b9-054a-479e-8ed5-cff6df83a7ba" path="/var/lib/kubelet/pods/ae19d0b9-054a-479e-8ed5-cff6df83a7ba/volumes" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.967875 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57c7d3c-d5f8-45d9-bf1b-51ec05afd543" path="/var/lib/kubelet/pods/b57c7d3c-d5f8-45d9-bf1b-51ec05afd543/volumes" Nov 21 19:28:29 crc kubenswrapper[4701]: I1121 19:28:29.968507 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59" path="/var/lib/kubelet/pods/e1d4e8dd-f9a5-4fd6-b533-c84cd7087a59/volumes" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.096327 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.218066 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhldz\" (UniqueName: \"kubernetes.io/projected/9961b99c-11d1-450a-9125-f48bee433d17-kube-api-access-nhldz\") pod \"9961b99c-11d1-450a-9125-f48bee433d17\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.218163 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-catalog-content\") pod \"9961b99c-11d1-450a-9125-f48bee433d17\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.218339 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-utilities\") pod \"9961b99c-11d1-450a-9125-f48bee433d17\" (UID: \"9961b99c-11d1-450a-9125-f48bee433d17\") " Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.219702 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-utilities" (OuterVolumeSpecName: "utilities") pod "9961b99c-11d1-450a-9125-f48bee433d17" (UID: "9961b99c-11d1-450a-9125-f48bee433d17"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.227372 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9961b99c-11d1-450a-9125-f48bee433d17-kube-api-access-nhldz" (OuterVolumeSpecName: "kube-api-access-nhldz") pod "9961b99c-11d1-450a-9125-f48bee433d17" (UID: "9961b99c-11d1-450a-9125-f48bee433d17"). InnerVolumeSpecName "kube-api-access-nhldz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.288104 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9961b99c-11d1-450a-9125-f48bee433d17" (UID: "9961b99c-11d1-450a-9125-f48bee433d17"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.320746 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhldz\" (UniqueName: \"kubernetes.io/projected/9961b99c-11d1-450a-9125-f48bee433d17-kube-api-access-nhldz\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.320788 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.320798 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9961b99c-11d1-450a-9125-f48bee433d17-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.518226 4701 generic.go:334] "Generic (PLEG): container finished" podID="9961b99c-11d1-450a-9125-f48bee433d17" containerID="365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae" exitCode=0 Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.518316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerDied","Data":"365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae"} Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.518394 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fc87q" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.519817 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fc87q" event={"ID":"9961b99c-11d1-450a-9125-f48bee433d17","Type":"ContainerDied","Data":"1613c179a145510d02742c522801f7c5888a2edd71151c98b3f962dca4a3cec1"} Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.519912 4701 scope.go:117] "RemoveContainer" containerID="365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.564158 4701 scope.go:117] "RemoveContainer" containerID="5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.602273 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fc87q"] Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.624023 4701 scope.go:117] "RemoveContainer" containerID="34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.702015 4701 scope.go:117] "RemoveContainer" containerID="365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.707111 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fc87q"] Nov 21 19:28:30 crc kubenswrapper[4701]: E1121 19:28:30.709239 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae\": container with ID starting with 365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae not found: ID does not exist" containerID="365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.709338 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae"} err="failed to get container status \"365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae\": rpc error: code = NotFound desc = could not find container \"365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae\": container with ID starting with 365fd0a05c17c63c3561032e1dba2bab6f3476683e804668f55ddcda27fc38ae not found: ID does not exist" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.709369 4701 scope.go:117] "RemoveContainer" containerID="5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf" Nov 21 19:28:30 crc kubenswrapper[4701]: E1121 19:28:30.727170 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf\": container with ID starting with 5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf not found: ID does not exist" containerID="5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.727258 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf"} err="failed to get container status \"5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf\": rpc error: code = NotFound desc = could not find container \"5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf\": container with ID starting with 5168a5d3c56e426cf9cddc505217c6fecee255d67a1436279f578e0b3ce920bf not found: ID does not exist" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.731337 4701 scope.go:117] "RemoveContainer" containerID="34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10" Nov 21 19:28:30 crc kubenswrapper[4701]: E1121 19:28:30.740217 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10\": container with ID starting with 34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10 not found: ID does not exist" containerID="34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10" Nov 21 19:28:30 crc kubenswrapper[4701]: I1121 19:28:30.740280 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10"} err="failed to get container status \"34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10\": rpc error: code = NotFound desc = could not find container \"34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10\": container with ID starting with 34856b18cd2c11fbb2e272689ef6f90fd20b6a56533da1ff33e082b6a0cbed10 not found: ID does not exist" Nov 21 19:28:31 crc kubenswrapper[4701]: I1121 19:28:31.973642 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9961b99c-11d1-450a-9125-f48bee433d17" path="/var/lib/kubelet/pods/9961b99c-11d1-450a-9125-f48bee433d17/volumes" Nov 21 19:28:37 crc kubenswrapper[4701]: I1121 19:28:37.952007 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:28:37 crc kubenswrapper[4701]: E1121 19:28:37.953612 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:28:52 crc kubenswrapper[4701]: I1121 19:28:52.953722 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:28:52 crc kubenswrapper[4701]: E1121 19:28:52.954778 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:28:56 crc kubenswrapper[4701]: I1121 19:28:56.879918 4701 generic.go:334] "Generic (PLEG): container finished" podID="d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" containerID="bbbeb7a906e3da13725a85552576e896aefcf6d45a06ac0d5002cb30702ad503" exitCode=0 Nov 21 19:28:56 crc kubenswrapper[4701]: I1121 19:28:56.880019 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" event={"ID":"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2","Type":"ContainerDied","Data":"bbbeb7a906e3da13725a85552576e896aefcf6d45a06ac0d5002cb30702ad503"} Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.489138 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.632248 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-ssh-key\") pod \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.632693 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-bootstrap-combined-ca-bundle\") pod \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.632861 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-inventory\") pod \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.632969 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5m29p\" (UniqueName: \"kubernetes.io/projected/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-kube-api-access-5m29p\") pod \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\" (UID: \"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2\") " Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.641712 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" (UID: "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.645531 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-kube-api-access-5m29p" (OuterVolumeSpecName: "kube-api-access-5m29p") pod "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" (UID: "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2"). InnerVolumeSpecName "kube-api-access-5m29p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.665335 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" (UID: "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.677665 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-inventory" (OuterVolumeSpecName: "inventory") pod "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" (UID: "d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.736467 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.736519 4701 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.736544 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.736563 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5m29p\" (UniqueName: \"kubernetes.io/projected/d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2-kube-api-access-5m29p\") on node \"crc\" DevicePath \"\"" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.908602 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" event={"ID":"d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2","Type":"ContainerDied","Data":"c5c7cb8cc8b1f139bc8f9bb1cc4db4c70eb771c14eb64dd330a7c3d27806a6a1"} Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.908668 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5c7cb8cc8b1f139bc8f9bb1cc4db4c70eb771c14eb64dd330a7c3d27806a6a1" Nov 21 19:28:58 crc kubenswrapper[4701]: I1121 19:28:58.908971 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.030641 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp"] Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031292 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="extract-content" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031321 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="extract-content" Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031345 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="registry-server" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031355 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="registry-server" Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031378 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="extract-utilities" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031389 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="extract-utilities" Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031408 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="registry-server" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031417 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="registry-server" Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031439 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="extract-content" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031449 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="extract-content" Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031466 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="extract-utilities" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031474 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="extract-utilities" Nov 21 19:28:59 crc kubenswrapper[4701]: E1121 19:28:59.031499 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031511 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031784 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9961b99c-11d1-450a-9125-f48bee433d17" containerName="registry-server" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031829 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.031853 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a8ccc76-9a49-4840-99ce-d87c21a3d0a2" containerName="registry-server" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.033700 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.039119 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.039261 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.039129 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.040271 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.059008 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp"] Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.146511 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.146638 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2br4\" (UniqueName: \"kubernetes.io/projected/775c15c9-3c73-4e78-ad8e-b02163afc9f2-kube-api-access-x2br4\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.146678 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.250178 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2br4\" (UniqueName: \"kubernetes.io/projected/775c15c9-3c73-4e78-ad8e-b02163afc9f2-kube-api-access-x2br4\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.250429 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.251093 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.256285 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.258277 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.278854 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2br4\" (UniqueName: \"kubernetes.io/projected/775c15c9-3c73-4e78-ad8e-b02163afc9f2-kube-api-access-x2br4\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:28:59 crc kubenswrapper[4701]: I1121 19:28:59.353041 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:29:00 crc kubenswrapper[4701]: I1121 19:29:00.049433 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp"] Nov 21 19:29:00 crc kubenswrapper[4701]: I1121 19:29:00.938126 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" event={"ID":"775c15c9-3c73-4e78-ad8e-b02163afc9f2","Type":"ContainerStarted","Data":"daaf972d404dc6c88eafcd89e1723cfaf85ac1cb6b06915080bbfb2439c9a2de"} Nov 21 19:29:00 crc kubenswrapper[4701]: I1121 19:29:00.938557 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" event={"ID":"775c15c9-3c73-4e78-ad8e-b02163afc9f2","Type":"ContainerStarted","Data":"9dec2921c7f6e9714d96557f544339ed7e4247a671cb6c6d6d7a4d585ee8baba"} Nov 21 19:29:00 crc kubenswrapper[4701]: I1121 19:29:00.962983 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" podStartSLOduration=1.444632554 podStartE2EDuration="1.962962278s" podCreationTimestamp="2025-11-21 19:28:59 +0000 UTC" firstStartedPulling="2025-11-21 19:29:00.059728379 +0000 UTC m=+1630.844868416" lastFinishedPulling="2025-11-21 19:29:00.578058103 +0000 UTC m=+1631.363198140" observedRunningTime="2025-11-21 19:29:00.959170475 +0000 UTC m=+1631.744310512" watchObservedRunningTime="2025-11-21 19:29:00.962962278 +0000 UTC m=+1631.748102305" Nov 21 19:29:05 crc kubenswrapper[4701]: I1121 19:29:05.959964 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:29:05 crc kubenswrapper[4701]: E1121 19:29:05.971591 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:29:07 crc kubenswrapper[4701]: I1121 19:29:07.632769 4701 scope.go:117] "RemoveContainer" containerID="ee5d884df78d68a09f41cea83ca3d19a11d33ccf31d3a483e0660757388d02a5" Nov 21 19:29:07 crc kubenswrapper[4701]: I1121 19:29:07.683675 4701 scope.go:117] "RemoveContainer" containerID="a85c35eaf0a095de96a20a175d4c0d830fcdc297a080ea405fd21e32c603c3a7" Nov 21 19:29:07 crc kubenswrapper[4701]: I1121 19:29:07.773716 4701 scope.go:117] "RemoveContainer" containerID="c0841604da0076c938df5bf002791e90742140ae154f52d377179a475738593c" Nov 21 19:29:07 crc kubenswrapper[4701]: I1121 19:29:07.811123 4701 scope.go:117] "RemoveContainer" containerID="144ab1c00e70af0f3ce24cfd4ca81d940aeab24cd8147543c02a5afc7cc22f70" Nov 21 19:29:07 crc kubenswrapper[4701]: I1121 19:29:07.893496 4701 scope.go:117] "RemoveContainer" containerID="cd64dd68f08fcd5336538fe9a99b3a0f95bacca4cddc3137671d5d9a9d61a2aa" Nov 21 19:29:07 crc kubenswrapper[4701]: I1121 19:29:07.964478 4701 scope.go:117] "RemoveContainer" containerID="0e9b77e4afcfdfcec70788530b66cce6fe61e13c29b7b8dfec8da335ca28e5fa" Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.085114 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-66xr9"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.101334 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3b77-account-create-46vm2"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.110022 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-958c-account-create-dzqch"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.117882 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3b77-account-create-46vm2"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.124988 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-r44r6"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.133644 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-66xr9"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.155150 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-958c-account-create-dzqch"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.175291 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-r44r6"] Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.972157 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ca2f6e9-d0f5-44ce-8562-74480c80d847" path="/var/lib/kubelet/pods/0ca2f6e9-d0f5-44ce-8562-74480c80d847/volumes" Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.974018 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="556faa3b-2540-4344-b293-b68e892e4459" path="/var/lib/kubelet/pods/556faa3b-2540-4344-b293-b68e892e4459/volumes" Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.975184 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="751ba251-758b-4c84-afb8-90205e9cb779" path="/var/lib/kubelet/pods/751ba251-758b-4c84-afb8-90205e9cb779/volumes" Nov 21 19:29:09 crc kubenswrapper[4701]: I1121 19:29:09.976678 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5ce0222-5363-4b1d-acd2-f8b5a319ad24" path="/var/lib/kubelet/pods/c5ce0222-5363-4b1d-acd2-f8b5a319ad24/volumes" Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.065694 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-7b44j"] Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.079557 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-3e07-account-create-xh8l9"] Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.093617 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-vkw87"] Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.102871 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-7b44j"] Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.110873 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-vkw87"] Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.118791 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-3e07-account-create-xh8l9"] Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.971167 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27fa1270-e5a1-467f-90e0-88c263d82c29" path="/var/lib/kubelet/pods/27fa1270-e5a1-467f-90e0-88c263d82c29/volumes" Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.972530 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85" path="/var/lib/kubelet/pods/6f9acdb0-f91b-4cb5-bf36-34b5fcaa7c85/volumes" Nov 21 19:29:15 crc kubenswrapper[4701]: I1121 19:29:15.973762 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b122f197-4d99-467c-b60c-c2b7912244ba" path="/var/lib/kubelet/pods/b122f197-4d99-467c-b60c-c2b7912244ba/volumes" Nov 21 19:29:16 crc kubenswrapper[4701]: I1121 19:29:16.045345 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-plj99"] Nov 21 19:29:16 crc kubenswrapper[4701]: I1121 19:29:16.064586 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-plj99"] Nov 21 19:29:16 crc kubenswrapper[4701]: I1121 19:29:16.078330 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-20ac-account-create-h7hgn"] Nov 21 19:29:16 crc kubenswrapper[4701]: I1121 19:29:16.085022 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-20ac-account-create-h7hgn"] Nov 21 19:29:16 crc kubenswrapper[4701]: I1121 19:29:16.952123 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:29:16 crc kubenswrapper[4701]: E1121 19:29:16.953230 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:29:17 crc kubenswrapper[4701]: I1121 19:29:17.988777 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6adc43bb-8f76-4e28-9afb-7999845bd2ac" path="/var/lib/kubelet/pods/6adc43bb-8f76-4e28-9afb-7999845bd2ac/volumes" Nov 21 19:29:18 crc kubenswrapper[4701]: I1121 19:29:17.999971 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbdec32f-0ec7-4585-a554-aebca2106122" path="/var/lib/kubelet/pods/cbdec32f-0ec7-4585-a554-aebca2106122/volumes" Nov 21 19:29:25 crc kubenswrapper[4701]: I1121 19:29:25.053436 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-xgwkm"] Nov 21 19:29:25 crc kubenswrapper[4701]: I1121 19:29:25.071150 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-xgwkm"] Nov 21 19:29:25 crc kubenswrapper[4701]: I1121 19:29:25.967522 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98d3b473-8ffd-47bb-a010-65c275226084" path="/var/lib/kubelet/pods/98d3b473-8ffd-47bb-a010-65c275226084/volumes" Nov 21 19:29:30 crc kubenswrapper[4701]: I1121 19:29:30.951044 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:29:30 crc kubenswrapper[4701]: E1121 19:29:30.952521 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:29:44 crc kubenswrapper[4701]: I1121 19:29:44.951683 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:29:44 crc kubenswrapper[4701]: E1121 19:29:44.953397 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:29:56 crc kubenswrapper[4701]: I1121 19:29:56.076428 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-69n94"] Nov 21 19:29:56 crc kubenswrapper[4701]: I1121 19:29:56.088867 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-69n94"] Nov 21 19:29:56 crc kubenswrapper[4701]: I1121 19:29:56.952030 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:29:56 crc kubenswrapper[4701]: E1121 19:29:56.952824 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:29:57 crc kubenswrapper[4701]: I1121 19:29:57.968872 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1" path="/var/lib/kubelet/pods/1a8ca8ef-555a-4ef6-a09e-1ff0e9b841f1/volumes" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.189596 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt"] Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.192991 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.196177 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.196553 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.204794 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt"] Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.342714 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmvsl\" (UniqueName: \"kubernetes.io/projected/379b0a05-e937-47ce-90c4-5fea3738796b-kube-api-access-tmvsl\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.342892 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/379b0a05-e937-47ce-90c4-5fea3738796b-secret-volume\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.342958 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/379b0a05-e937-47ce-90c4-5fea3738796b-config-volume\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.446040 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmvsl\" (UniqueName: \"kubernetes.io/projected/379b0a05-e937-47ce-90c4-5fea3738796b-kube-api-access-tmvsl\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.446313 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/379b0a05-e937-47ce-90c4-5fea3738796b-secret-volume\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.446497 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/379b0a05-e937-47ce-90c4-5fea3738796b-config-volume\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.447459 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/379b0a05-e937-47ce-90c4-5fea3738796b-config-volume\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.454082 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/379b0a05-e937-47ce-90c4-5fea3738796b-secret-volume\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.471478 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmvsl\" (UniqueName: \"kubernetes.io/projected/379b0a05-e937-47ce-90c4-5fea3738796b-kube-api-access-tmvsl\") pod \"collect-profiles-29395890-j7xwt\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:00 crc kubenswrapper[4701]: I1121 19:30:00.528844 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:01 crc kubenswrapper[4701]: I1121 19:30:01.085165 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt"] Nov 21 19:30:01 crc kubenswrapper[4701]: I1121 19:30:01.792099 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" event={"ID":"379b0a05-e937-47ce-90c4-5fea3738796b","Type":"ContainerStarted","Data":"401821c614254f25c4ebb5efe6d53dc187b9a615601207ba2bf952d6145067be"} Nov 21 19:30:01 crc kubenswrapper[4701]: I1121 19:30:01.792509 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" event={"ID":"379b0a05-e937-47ce-90c4-5fea3738796b","Type":"ContainerStarted","Data":"e4ed9cd08ca4f5eaa3f6b3651ad7987a69e8009d8b9050a64adca84d5a682438"} Nov 21 19:30:02 crc kubenswrapper[4701]: I1121 19:30:02.807726 4701 generic.go:334] "Generic (PLEG): container finished" podID="379b0a05-e937-47ce-90c4-5fea3738796b" containerID="401821c614254f25c4ebb5efe6d53dc187b9a615601207ba2bf952d6145067be" exitCode=0 Nov 21 19:30:02 crc kubenswrapper[4701]: I1121 19:30:02.807809 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" event={"ID":"379b0a05-e937-47ce-90c4-5fea3738796b","Type":"ContainerDied","Data":"401821c614254f25c4ebb5efe6d53dc187b9a615601207ba2bf952d6145067be"} Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.264627 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.319325 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/379b0a05-e937-47ce-90c4-5fea3738796b-secret-volume\") pod \"379b0a05-e937-47ce-90c4-5fea3738796b\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.319876 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/379b0a05-e937-47ce-90c4-5fea3738796b-config-volume\") pod \"379b0a05-e937-47ce-90c4-5fea3738796b\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.319964 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmvsl\" (UniqueName: \"kubernetes.io/projected/379b0a05-e937-47ce-90c4-5fea3738796b-kube-api-access-tmvsl\") pod \"379b0a05-e937-47ce-90c4-5fea3738796b\" (UID: \"379b0a05-e937-47ce-90c4-5fea3738796b\") " Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.322976 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/379b0a05-e937-47ce-90c4-5fea3738796b-config-volume" (OuterVolumeSpecName: "config-volume") pod "379b0a05-e937-47ce-90c4-5fea3738796b" (UID: "379b0a05-e937-47ce-90c4-5fea3738796b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.328321 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/379b0a05-e937-47ce-90c4-5fea3738796b-kube-api-access-tmvsl" (OuterVolumeSpecName: "kube-api-access-tmvsl") pod "379b0a05-e937-47ce-90c4-5fea3738796b" (UID: "379b0a05-e937-47ce-90c4-5fea3738796b"). InnerVolumeSpecName "kube-api-access-tmvsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.328464 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/379b0a05-e937-47ce-90c4-5fea3738796b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "379b0a05-e937-47ce-90c4-5fea3738796b" (UID: "379b0a05-e937-47ce-90c4-5fea3738796b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.423166 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/379b0a05-e937-47ce-90c4-5fea3738796b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.423226 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/379b0a05-e937-47ce-90c4-5fea3738796b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.423241 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmvsl\" (UniqueName: \"kubernetes.io/projected/379b0a05-e937-47ce-90c4-5fea3738796b-kube-api-access-tmvsl\") on node \"crc\" DevicePath \"\"" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.829113 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" event={"ID":"379b0a05-e937-47ce-90c4-5fea3738796b","Type":"ContainerDied","Data":"e4ed9cd08ca4f5eaa3f6b3651ad7987a69e8009d8b9050a64adca84d5a682438"} Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.829175 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt" Nov 21 19:30:03 crc kubenswrapper[4701]: I1121 19:30:03.829182 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4ed9cd08ca4f5eaa3f6b3651ad7987a69e8009d8b9050a64adca84d5a682438" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.175452 4701 scope.go:117] "RemoveContainer" containerID="ccad98e4a82721a16bf14c894df82fe488525177df96540641c8eb6560eee3b9" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.235936 4701 scope.go:117] "RemoveContainer" containerID="e415adef03894db4bda0f20c61d5f4e6950090bcfd161f18473e10e6f144a19a" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.276376 4701 scope.go:117] "RemoveContainer" containerID="01f74fd3eb413d0e6f8c8eb0e3fb985777af444441e2c4844577070869f4c46e" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.316144 4701 scope.go:117] "RemoveContainer" containerID="52af84ab1204a8fa9b18e6eae42ba1f75dfc50729fcb446205319c006baf36a5" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.367344 4701 scope.go:117] "RemoveContainer" containerID="86fa1db4927122530f5aa1a3ab987e220b706eddd564ec4982f87982eb7f40dc" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.429479 4701 scope.go:117] "RemoveContainer" containerID="bc0b03336d4f24deaad77d15611a906a0de04779c3825a44e8772d79d4d392c1" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.471680 4701 scope.go:117] "RemoveContainer" containerID="a80d181868a6b8dd76586059b72078d0c221388e6852ede733d0655453e09a6f" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.502849 4701 scope.go:117] "RemoveContainer" containerID="b34ed9dcc9a9eeea7f6ea5c9cbd5e400bbc53d6155478d416cb5f6588395fa39" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.544383 4701 scope.go:117] "RemoveContainer" containerID="af895a35ec1e40b2001b055d1f06610607d627b64fff99653689a60994a3d7a8" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.577690 4701 scope.go:117] "RemoveContainer" containerID="f4135d9732575126a9da37439373f96b909ae9dc891b76dcc394044243736a52" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.606607 4701 scope.go:117] "RemoveContainer" containerID="f54eebddfbea40c49d40447b53ea6b5e0e72b3205a9fc9dd810f3660ce854ec7" Nov 21 19:30:08 crc kubenswrapper[4701]: I1121 19:30:08.952005 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:30:08 crc kubenswrapper[4701]: E1121 19:30:08.952418 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:30:10 crc kubenswrapper[4701]: I1121 19:30:10.084928 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-c699r"] Nov 21 19:30:10 crc kubenswrapper[4701]: I1121 19:30:10.099557 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-c699r"] Nov 21 19:30:11 crc kubenswrapper[4701]: I1121 19:30:11.053303 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-kfsds"] Nov 21 19:30:11 crc kubenswrapper[4701]: I1121 19:30:11.082924 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-kfsds"] Nov 21 19:30:11 crc kubenswrapper[4701]: I1121 19:30:11.971786 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12e527f7-2e4d-421a-8639-e282a383774a" path="/var/lib/kubelet/pods/12e527f7-2e4d-421a-8639-e282a383774a/volumes" Nov 21 19:30:11 crc kubenswrapper[4701]: I1121 19:30:11.972539 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91518a76-c4e2-4f08-831a-aa8fb9d4778c" path="/var/lib/kubelet/pods/91518a76-c4e2-4f08-831a-aa8fb9d4778c/volumes" Nov 21 19:30:12 crc kubenswrapper[4701]: I1121 19:30:12.041451 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-rp9lc"] Nov 21 19:30:12 crc kubenswrapper[4701]: I1121 19:30:12.059863 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-rp9lc"] Nov 21 19:30:13 crc kubenswrapper[4701]: I1121 19:30:13.976405 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a93232c-afb8-4ff5-8775-5c3574997149" path="/var/lib/kubelet/pods/0a93232c-afb8-4ff5-8775-5c3574997149/volumes" Nov 21 19:30:19 crc kubenswrapper[4701]: I1121 19:30:19.958491 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:30:19 crc kubenswrapper[4701]: E1121 19:30:19.959226 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:30:23 crc kubenswrapper[4701]: I1121 19:30:23.072929 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-69chf"] Nov 21 19:30:23 crc kubenswrapper[4701]: I1121 19:30:23.100852 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-69chf"] Nov 21 19:30:23 crc kubenswrapper[4701]: I1121 19:30:23.965706 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5b93dd5-e6da-4f02-ac4d-b89773e967d3" path="/var/lib/kubelet/pods/d5b93dd5-e6da-4f02-ac4d-b89773e967d3/volumes" Nov 21 19:30:26 crc kubenswrapper[4701]: I1121 19:30:26.051374 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-sklns"] Nov 21 19:30:26 crc kubenswrapper[4701]: I1121 19:30:26.067618 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-sklns"] Nov 21 19:30:27 crc kubenswrapper[4701]: I1121 19:30:27.968033 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87272c2c-3166-4a6a-aff9-41278b0b1b51" path="/var/lib/kubelet/pods/87272c2c-3166-4a6a-aff9-41278b0b1b51/volumes" Nov 21 19:30:31 crc kubenswrapper[4701]: I1121 19:30:31.954071 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:30:31 crc kubenswrapper[4701]: E1121 19:30:31.955492 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:30:45 crc kubenswrapper[4701]: I1121 19:30:45.953232 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:30:45 crc kubenswrapper[4701]: E1121 19:30:45.954539 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:30:50 crc kubenswrapper[4701]: I1121 19:30:50.525534 4701 generic.go:334] "Generic (PLEG): container finished" podID="775c15c9-3c73-4e78-ad8e-b02163afc9f2" containerID="daaf972d404dc6c88eafcd89e1723cfaf85ac1cb6b06915080bbfb2439c9a2de" exitCode=0 Nov 21 19:30:50 crc kubenswrapper[4701]: I1121 19:30:50.525661 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" event={"ID":"775c15c9-3c73-4e78-ad8e-b02163afc9f2","Type":"ContainerDied","Data":"daaf972d404dc6c88eafcd89e1723cfaf85ac1cb6b06915080bbfb2439c9a2de"} Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.132065 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.238036 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-ssh-key\") pod \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.238304 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2br4\" (UniqueName: \"kubernetes.io/projected/775c15c9-3c73-4e78-ad8e-b02163afc9f2-kube-api-access-x2br4\") pod \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.238361 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-inventory\") pod \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\" (UID: \"775c15c9-3c73-4e78-ad8e-b02163afc9f2\") " Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.246294 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/775c15c9-3c73-4e78-ad8e-b02163afc9f2-kube-api-access-x2br4" (OuterVolumeSpecName: "kube-api-access-x2br4") pod "775c15c9-3c73-4e78-ad8e-b02163afc9f2" (UID: "775c15c9-3c73-4e78-ad8e-b02163afc9f2"). InnerVolumeSpecName "kube-api-access-x2br4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.296518 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "775c15c9-3c73-4e78-ad8e-b02163afc9f2" (UID: "775c15c9-3c73-4e78-ad8e-b02163afc9f2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.297267 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-inventory" (OuterVolumeSpecName: "inventory") pod "775c15c9-3c73-4e78-ad8e-b02163afc9f2" (UID: "775c15c9-3c73-4e78-ad8e-b02163afc9f2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.341283 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2br4\" (UniqueName: \"kubernetes.io/projected/775c15c9-3c73-4e78-ad8e-b02163afc9f2-kube-api-access-x2br4\") on node \"crc\" DevicePath \"\"" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.341325 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.341339 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/775c15c9-3c73-4e78-ad8e-b02163afc9f2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.560944 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" event={"ID":"775c15c9-3c73-4e78-ad8e-b02163afc9f2","Type":"ContainerDied","Data":"9dec2921c7f6e9714d96557f544339ed7e4247a671cb6c6d6d7a4d585ee8baba"} Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.561019 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9dec2921c7f6e9714d96557f544339ed7e4247a671cb6c6d6d7a4d585ee8baba" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.561040 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.713622 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9"] Nov 21 19:30:52 crc kubenswrapper[4701]: E1121 19:30:52.714494 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="775c15c9-3c73-4e78-ad8e-b02163afc9f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.714541 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="775c15c9-3c73-4e78-ad8e-b02163afc9f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 21 19:30:52 crc kubenswrapper[4701]: E1121 19:30:52.714630 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="379b0a05-e937-47ce-90c4-5fea3738796b" containerName="collect-profiles" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.714652 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="379b0a05-e937-47ce-90c4-5fea3738796b" containerName="collect-profiles" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.715144 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="775c15c9-3c73-4e78-ad8e-b02163afc9f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.715590 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="379b0a05-e937-47ce-90c4-5fea3738796b" containerName="collect-profiles" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.717019 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.723545 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.723860 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.724757 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.725861 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.736664 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9"] Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.855884 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck2pc\" (UniqueName: \"kubernetes.io/projected/ae68b914-c2d3-4df9-bd3c-563524bb9ded-kube-api-access-ck2pc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.855997 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.856140 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.958670 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck2pc\" (UniqueName: \"kubernetes.io/projected/ae68b914-c2d3-4df9-bd3c-563524bb9ded-kube-api-access-ck2pc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.958809 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.959007 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.965040 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.965229 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:52 crc kubenswrapper[4701]: I1121 19:30:52.991310 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck2pc\" (UniqueName: \"kubernetes.io/projected/ae68b914-c2d3-4df9-bd3c-563524bb9ded-kube-api-access-ck2pc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t25h9\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:53 crc kubenswrapper[4701]: I1121 19:30:53.043821 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:30:53 crc kubenswrapper[4701]: I1121 19:30:53.480420 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9"] Nov 21 19:30:53 crc kubenswrapper[4701]: I1121 19:30:53.585316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" event={"ID":"ae68b914-c2d3-4df9-bd3c-563524bb9ded","Type":"ContainerStarted","Data":"1cddae403bc443a04f118a2d7f24fa6a7196335d2afc4624e1d9cb01c9e6a79a"} Nov 21 19:30:54 crc kubenswrapper[4701]: I1121 19:30:54.601354 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" event={"ID":"ae68b914-c2d3-4df9-bd3c-563524bb9ded","Type":"ContainerStarted","Data":"a716ae622df53f50a7372c8620ab451a176d2b56cffa5ee8081a906e93462640"} Nov 21 19:30:54 crc kubenswrapper[4701]: I1121 19:30:54.631472 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" podStartSLOduration=2.144443249 podStartE2EDuration="2.6314361s" podCreationTimestamp="2025-11-21 19:30:52 +0000 UTC" firstStartedPulling="2025-11-21 19:30:53.486535272 +0000 UTC m=+1744.271675309" lastFinishedPulling="2025-11-21 19:30:53.973528093 +0000 UTC m=+1744.758668160" observedRunningTime="2025-11-21 19:30:54.630335601 +0000 UTC m=+1745.415475668" watchObservedRunningTime="2025-11-21 19:30:54.6314361 +0000 UTC m=+1745.416576167" Nov 21 19:31:00 crc kubenswrapper[4701]: I1121 19:31:00.951512 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:31:00 crc kubenswrapper[4701]: E1121 19:31:00.954751 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:31:08 crc kubenswrapper[4701]: I1121 19:31:08.112370 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-0ce8-account-create-pzhxk"] Nov 21 19:31:08 crc kubenswrapper[4701]: I1121 19:31:08.132814 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-0ce8-account-create-pzhxk"] Nov 21 19:31:08 crc kubenswrapper[4701]: I1121 19:31:08.924837 4701 scope.go:117] "RemoveContainer" containerID="7b177444e98fadf0cc0025209b7d78991c3bced6b8dad01552c748c345076aa4" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.027810 4701 scope.go:117] "RemoveContainer" containerID="d7629fa548c4c840a30c8117231ecfbe33d71e948b82c9e7e915673b9ce0fd48" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.073720 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-d84d-account-create-nx659"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.092447 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-d84d-account-create-nx659"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.101674 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-xcwpl"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.106772 4701 scope.go:117] "RemoveContainer" containerID="43627ff489f97e02acf7393717d1fff2e12c77e2238e7eabf64abba14d9a3ff8" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.108053 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-054e-account-create-4x7l8"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.115603 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-g5rt5"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.123277 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-g5rt5"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.134869 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-xcwpl"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.144551 4701 scope.go:117] "RemoveContainer" containerID="b698f1c37dc783e724d65f9ec1082a2372bce8259a478087fe49beb91cbea405" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.145098 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-xvfz2"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.153015 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-054e-account-create-4x7l8"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.160651 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-xvfz2"] Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.211033 4701 scope.go:117] "RemoveContainer" containerID="75887c19942bf90b052d280604c77e8e44be72e03d815f7b73e82cae6d1936d6" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.973118 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eb288c4-5e71-4918-b69c-918bc5fa4bee" path="/var/lib/kubelet/pods/0eb288c4-5e71-4918-b69c-918bc5fa4bee/volumes" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.974449 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36850367-a3b5-4f05-9ac6-223e900ab01e" path="/var/lib/kubelet/pods/36850367-a3b5-4f05-9ac6-223e900ab01e/volumes" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.975751 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502" path="/var/lib/kubelet/pods/924bddf4-8ec7-4d35-b3ec-1bf4ff8b5502/volumes" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.976875 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7b15c44-532c-4df9-aaec-d0fee8594570" path="/var/lib/kubelet/pods/a7b15c44-532c-4df9-aaec-d0fee8594570/volumes" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.979027 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9e5386f-281c-4a6a-bbe4-0b1f15b82869" path="/var/lib/kubelet/pods/b9e5386f-281c-4a6a-bbe4-0b1f15b82869/volumes" Nov 21 19:31:09 crc kubenswrapper[4701]: I1121 19:31:09.980266 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf31154f-0e27-4b6c-889c-608d4a6aaf41" path="/var/lib/kubelet/pods/cf31154f-0e27-4b6c-889c-608d4a6aaf41/volumes" Nov 21 19:31:14 crc kubenswrapper[4701]: I1121 19:31:14.953079 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:31:14 crc kubenswrapper[4701]: E1121 19:31:14.954697 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:31:29 crc kubenswrapper[4701]: I1121 19:31:29.960226 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:31:29 crc kubenswrapper[4701]: E1121 19:31:29.961280 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:31:42 crc kubenswrapper[4701]: I1121 19:31:42.951248 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:31:42 crc kubenswrapper[4701]: E1121 19:31:42.952524 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:31:43 crc kubenswrapper[4701]: I1121 19:31:43.065005 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wtbqn"] Nov 21 19:31:43 crc kubenswrapper[4701]: I1121 19:31:43.082727 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wtbqn"] Nov 21 19:31:43 crc kubenswrapper[4701]: I1121 19:31:43.973022 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63425ae3-f835-4767-b6dd-f5519aa2b0ae" path="/var/lib/kubelet/pods/63425ae3-f835-4767-b6dd-f5519aa2b0ae/volumes" Nov 21 19:31:57 crc kubenswrapper[4701]: I1121 19:31:57.953243 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:31:57 crc kubenswrapper[4701]: E1121 19:31:57.955277 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:32:04 crc kubenswrapper[4701]: I1121 19:32:04.099344 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-h694n"] Nov 21 19:32:04 crc kubenswrapper[4701]: I1121 19:32:04.111484 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-h694n"] Nov 21 19:32:05 crc kubenswrapper[4701]: I1121 19:32:05.966774 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1554914a-f5b4-46aa-90a4-a9c07bdd6e53" path="/var/lib/kubelet/pods/1554914a-f5b4-46aa-90a4-a9c07bdd6e53/volumes" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.047470 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-r4vt6"] Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.057547 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-r4vt6"] Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.397416 4701 scope.go:117] "RemoveContainer" containerID="3c57170e69e5dee3f6a3e241c55d44b8d8a5b743b6c39b137e0d778e32cf6b3e" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.457765 4701 scope.go:117] "RemoveContainer" containerID="e6418b9dae36b0f9223e04f84281e520b64a115451b632c0efd4cbc6f2b8aaef" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.540555 4701 scope.go:117] "RemoveContainer" containerID="5fd4bdb33bff2bd7058f9f93b26328f9db1b94f668e4eb96a54670221212c936" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.573538 4701 scope.go:117] "RemoveContainer" containerID="9cc0d069603ccce34b7334203837f5656e0905cc0da53d6ab6a2a36b91c78bbf" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.629094 4701 scope.go:117] "RemoveContainer" containerID="bfbae9e25027c5ee66380d2e91a70b9cc9d5c7e646c57b632dba2d73296e9ccf" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.681344 4701 scope.go:117] "RemoveContainer" containerID="25e04e39eda9520d9117e128c33bee39b932aae765eb512834b29569dc15a75b" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.746093 4701 scope.go:117] "RemoveContainer" containerID="fac783339a1ed332dab93147ff38773b5f122da9057a51acb39a36523405e0d6" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.770546 4701 scope.go:117] "RemoveContainer" containerID="9ea2d67465568d28afaed8a1e9f21d622ec1447f1387f2b485274e2b58b2beb5" Nov 21 19:32:09 crc kubenswrapper[4701]: I1121 19:32:09.965862 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4933c0b9-8f15-4b88-90ea-7fb26f2f4d66" path="/var/lib/kubelet/pods/4933c0b9-8f15-4b88-90ea-7fb26f2f4d66/volumes" Nov 21 19:32:11 crc kubenswrapper[4701]: I1121 19:32:11.952117 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:32:11 crc kubenswrapper[4701]: E1121 19:32:11.952822 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:32:13 crc kubenswrapper[4701]: I1121 19:32:13.725377 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae68b914-c2d3-4df9-bd3c-563524bb9ded" containerID="a716ae622df53f50a7372c8620ab451a176d2b56cffa5ee8081a906e93462640" exitCode=0 Nov 21 19:32:13 crc kubenswrapper[4701]: I1121 19:32:13.725502 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" event={"ID":"ae68b914-c2d3-4df9-bd3c-563524bb9ded","Type":"ContainerDied","Data":"a716ae622df53f50a7372c8620ab451a176d2b56cffa5ee8081a906e93462640"} Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.233632 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.421929 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-ssh-key\") pod \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.422080 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ck2pc\" (UniqueName: \"kubernetes.io/projected/ae68b914-c2d3-4df9-bd3c-563524bb9ded-kube-api-access-ck2pc\") pod \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.422473 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-inventory\") pod \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\" (UID: \"ae68b914-c2d3-4df9-bd3c-563524bb9ded\") " Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.433835 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae68b914-c2d3-4df9-bd3c-563524bb9ded-kube-api-access-ck2pc" (OuterVolumeSpecName: "kube-api-access-ck2pc") pod "ae68b914-c2d3-4df9-bd3c-563524bb9ded" (UID: "ae68b914-c2d3-4df9-bd3c-563524bb9ded"). InnerVolumeSpecName "kube-api-access-ck2pc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.471853 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-inventory" (OuterVolumeSpecName: "inventory") pod "ae68b914-c2d3-4df9-bd3c-563524bb9ded" (UID: "ae68b914-c2d3-4df9-bd3c-563524bb9ded"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.473920 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ae68b914-c2d3-4df9-bd3c-563524bb9ded" (UID: "ae68b914-c2d3-4df9-bd3c-563524bb9ded"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.526018 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.526062 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ck2pc\" (UniqueName: \"kubernetes.io/projected/ae68b914-c2d3-4df9-bd3c-563524bb9ded-kube-api-access-ck2pc\") on node \"crc\" DevicePath \"\"" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.526083 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae68b914-c2d3-4df9-bd3c-563524bb9ded-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.759757 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" event={"ID":"ae68b914-c2d3-4df9-bd3c-563524bb9ded","Type":"ContainerDied","Data":"1cddae403bc443a04f118a2d7f24fa6a7196335d2afc4624e1d9cb01c9e6a79a"} Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.759903 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cddae403bc443a04f118a2d7f24fa6a7196335d2afc4624e1d9cb01c9e6a79a" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.759927 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t25h9" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.904842 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd"] Nov 21 19:32:15 crc kubenswrapper[4701]: E1121 19:32:15.905542 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae68b914-c2d3-4df9-bd3c-563524bb9ded" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.905571 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae68b914-c2d3-4df9-bd3c-563524bb9ded" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.905909 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae68b914-c2d3-4df9-bd3c-563524bb9ded" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.906837 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.909526 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.910579 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.910840 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.913059 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:32:15 crc kubenswrapper[4701]: I1121 19:32:15.931300 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd"] Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.044759 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.048448 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9dmt\" (UniqueName: \"kubernetes.io/projected/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-kube-api-access-p9dmt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.049402 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.153798 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.153947 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9dmt\" (UniqueName: \"kubernetes.io/projected/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-kube-api-access-p9dmt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.154111 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.160873 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.162429 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.176177 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9dmt\" (UniqueName: \"kubernetes.io/projected/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-kube-api-access-p9dmt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rztzd\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.242792 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:16 crc kubenswrapper[4701]: I1121 19:32:16.931014 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd"] Nov 21 19:32:17 crc kubenswrapper[4701]: I1121 19:32:17.788178 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" event={"ID":"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a","Type":"ContainerStarted","Data":"0456c3aa87c1a31c71adb1d20bb1b3383030d63745acef9baa9182020fa7564b"} Nov 21 19:32:17 crc kubenswrapper[4701]: I1121 19:32:17.789424 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" event={"ID":"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a","Type":"ContainerStarted","Data":"36acbc9f1ad0625bd1978771ae0546def9cade39a3031aa473769089eb2b1b18"} Nov 21 19:32:17 crc kubenswrapper[4701]: I1121 19:32:17.822496 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" podStartSLOduration=2.333352453 podStartE2EDuration="2.82245265s" podCreationTimestamp="2025-11-21 19:32:15 +0000 UTC" firstStartedPulling="2025-11-21 19:32:16.93296616 +0000 UTC m=+1827.718106217" lastFinishedPulling="2025-11-21 19:32:17.422066387 +0000 UTC m=+1828.207206414" observedRunningTime="2025-11-21 19:32:17.812533658 +0000 UTC m=+1828.597673725" watchObservedRunningTime="2025-11-21 19:32:17.82245265 +0000 UTC m=+1828.607592707" Nov 21 19:32:25 crc kubenswrapper[4701]: I1121 19:32:25.110405 4701 generic.go:334] "Generic (PLEG): container finished" podID="2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" containerID="0456c3aa87c1a31c71adb1d20bb1b3383030d63745acef9baa9182020fa7564b" exitCode=0 Nov 21 19:32:25 crc kubenswrapper[4701]: I1121 19:32:25.110505 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" event={"ID":"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a","Type":"ContainerDied","Data":"0456c3aa87c1a31c71adb1d20bb1b3383030d63745acef9baa9182020fa7564b"} Nov 21 19:32:25 crc kubenswrapper[4701]: I1121 19:32:25.952242 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:32:25 crc kubenswrapper[4701]: E1121 19:32:25.953057 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.692326 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.733299 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-inventory\") pod \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.733822 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9dmt\" (UniqueName: \"kubernetes.io/projected/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-kube-api-access-p9dmt\") pod \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.734024 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-ssh-key\") pod \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\" (UID: \"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a\") " Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.741964 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-kube-api-access-p9dmt" (OuterVolumeSpecName: "kube-api-access-p9dmt") pod "2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" (UID: "2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a"). InnerVolumeSpecName "kube-api-access-p9dmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.778566 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-inventory" (OuterVolumeSpecName: "inventory") pod "2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" (UID: "2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.782356 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" (UID: "2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.838443 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.838511 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:32:26 crc kubenswrapper[4701]: I1121 19:32:26.838532 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9dmt\" (UniqueName: \"kubernetes.io/projected/2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a-kube-api-access-p9dmt\") on node \"crc\" DevicePath \"\"" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.139081 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" event={"ID":"2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a","Type":"ContainerDied","Data":"36acbc9f1ad0625bd1978771ae0546def9cade39a3031aa473769089eb2b1b18"} Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.139125 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36acbc9f1ad0625bd1978771ae0546def9cade39a3031aa473769089eb2b1b18" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.139152 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rztzd" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.234283 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx"] Nov 21 19:32:27 crc kubenswrapper[4701]: E1121 19:32:27.234906 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.234936 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.235190 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.236324 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.244608 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.244926 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlpq7\" (UniqueName: \"kubernetes.io/projected/40c71638-2add-4f3c-acc9-cc971cad107e-kube-api-access-vlpq7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.245163 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.246672 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.247074 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.247269 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.247431 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.264096 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx"] Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.347709 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.348098 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.348218 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlpq7\" (UniqueName: \"kubernetes.io/projected/40c71638-2add-4f3c-acc9-cc971cad107e-kube-api-access-vlpq7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.354583 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.357489 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.373030 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlpq7\" (UniqueName: \"kubernetes.io/projected/40c71638-2add-4f3c-acc9-cc971cad107e-kube-api-access-vlpq7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wtbcx\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:27 crc kubenswrapper[4701]: I1121 19:32:27.560275 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:32:28 crc kubenswrapper[4701]: I1121 19:32:28.224498 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx"] Nov 21 19:32:28 crc kubenswrapper[4701]: W1121 19:32:28.235948 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40c71638_2add_4f3c_acc9_cc971cad107e.slice/crio-8dca17d8608bae9dc8799c954557638dea777235d39d3dbef3ff738f353e5e2e WatchSource:0}: Error finding container 8dca17d8608bae9dc8799c954557638dea777235d39d3dbef3ff738f353e5e2e: Status 404 returned error can't find the container with id 8dca17d8608bae9dc8799c954557638dea777235d39d3dbef3ff738f353e5e2e Nov 21 19:32:29 crc kubenswrapper[4701]: I1121 19:32:29.168523 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" event={"ID":"40c71638-2add-4f3c-acc9-cc971cad107e","Type":"ContainerStarted","Data":"37ccf7ad39386d429522128396a33029080cec3432e37d3afd5cb8f500eac3b8"} Nov 21 19:32:29 crc kubenswrapper[4701]: I1121 19:32:29.168996 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" event={"ID":"40c71638-2add-4f3c-acc9-cc971cad107e","Type":"ContainerStarted","Data":"8dca17d8608bae9dc8799c954557638dea777235d39d3dbef3ff738f353e5e2e"} Nov 21 19:32:29 crc kubenswrapper[4701]: I1121 19:32:29.196410 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" podStartSLOduration=1.7818296359999999 podStartE2EDuration="2.196380587s" podCreationTimestamp="2025-11-21 19:32:27 +0000 UTC" firstStartedPulling="2025-11-21 19:32:28.239633502 +0000 UTC m=+1839.024773569" lastFinishedPulling="2025-11-21 19:32:28.654184493 +0000 UTC m=+1839.439324520" observedRunningTime="2025-11-21 19:32:29.19503226 +0000 UTC m=+1839.980172317" watchObservedRunningTime="2025-11-21 19:32:29.196380587 +0000 UTC m=+1839.981520654" Nov 21 19:32:36 crc kubenswrapper[4701]: I1121 19:32:36.953800 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:32:36 crc kubenswrapper[4701]: E1121 19:32:36.955525 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:32:47 crc kubenswrapper[4701]: I1121 19:32:47.952754 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:32:47 crc kubenswrapper[4701]: E1121 19:32:47.954251 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:32:49 crc kubenswrapper[4701]: I1121 19:32:49.081318 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-x5x89"] Nov 21 19:32:49 crc kubenswrapper[4701]: I1121 19:32:49.102279 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-x5x89"] Nov 21 19:32:49 crc kubenswrapper[4701]: I1121 19:32:49.967915 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f37895e6-74f4-4d54-a0b8-232c5ba5c018" path="/var/lib/kubelet/pods/f37895e6-74f4-4d54-a0b8-232c5ba5c018/volumes" Nov 21 19:33:02 crc kubenswrapper[4701]: I1121 19:33:02.951363 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:33:03 crc kubenswrapper[4701]: I1121 19:33:03.589436 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"7fbc56daee349295117a0bd71dcbc8f65f0cba0ed64444e6d442311c3039877d"} Nov 21 19:33:09 crc kubenswrapper[4701]: I1121 19:33:09.954401 4701 scope.go:117] "RemoveContainer" containerID="5ba58a1d76cee00b4c33d1041d44aefbe1a1a858ab8ef46e4371df68016be7cc" Nov 21 19:33:10 crc kubenswrapper[4701]: I1121 19:33:10.026184 4701 scope.go:117] "RemoveContainer" containerID="ca6070a3011b2d28c3839bc8413f71f64dfb43f29762867e4ce4f2321913c59d" Nov 21 19:33:16 crc kubenswrapper[4701]: I1121 19:33:16.799053 4701 generic.go:334] "Generic (PLEG): container finished" podID="40c71638-2add-4f3c-acc9-cc971cad107e" containerID="37ccf7ad39386d429522128396a33029080cec3432e37d3afd5cb8f500eac3b8" exitCode=0 Nov 21 19:33:16 crc kubenswrapper[4701]: I1121 19:33:16.799142 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" event={"ID":"40c71638-2add-4f3c-acc9-cc971cad107e","Type":"ContainerDied","Data":"37ccf7ad39386d429522128396a33029080cec3432e37d3afd5cb8f500eac3b8"} Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.398436 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.538630 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlpq7\" (UniqueName: \"kubernetes.io/projected/40c71638-2add-4f3c-acc9-cc971cad107e-kube-api-access-vlpq7\") pod \"40c71638-2add-4f3c-acc9-cc971cad107e\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.539474 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-inventory\") pod \"40c71638-2add-4f3c-acc9-cc971cad107e\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.540007 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-ssh-key\") pod \"40c71638-2add-4f3c-acc9-cc971cad107e\" (UID: \"40c71638-2add-4f3c-acc9-cc971cad107e\") " Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.548583 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40c71638-2add-4f3c-acc9-cc971cad107e-kube-api-access-vlpq7" (OuterVolumeSpecName: "kube-api-access-vlpq7") pod "40c71638-2add-4f3c-acc9-cc971cad107e" (UID: "40c71638-2add-4f3c-acc9-cc971cad107e"). InnerVolumeSpecName "kube-api-access-vlpq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.596612 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "40c71638-2add-4f3c-acc9-cc971cad107e" (UID: "40c71638-2add-4f3c-acc9-cc971cad107e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.599934 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-inventory" (OuterVolumeSpecName: "inventory") pod "40c71638-2add-4f3c-acc9-cc971cad107e" (UID: "40c71638-2add-4f3c-acc9-cc971cad107e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.643675 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.643721 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40c71638-2add-4f3c-acc9-cc971cad107e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.643737 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlpq7\" (UniqueName: \"kubernetes.io/projected/40c71638-2add-4f3c-acc9-cc971cad107e-kube-api-access-vlpq7\") on node \"crc\" DevicePath \"\"" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.836879 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" event={"ID":"40c71638-2add-4f3c-acc9-cc971cad107e","Type":"ContainerDied","Data":"8dca17d8608bae9dc8799c954557638dea777235d39d3dbef3ff738f353e5e2e"} Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.838473 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8dca17d8608bae9dc8799c954557638dea777235d39d3dbef3ff738f353e5e2e" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.836942 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wtbcx" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.949069 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6"] Nov 21 19:33:18 crc kubenswrapper[4701]: E1121 19:33:18.949737 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40c71638-2add-4f3c-acc9-cc971cad107e" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.949769 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="40c71638-2add-4f3c-acc9-cc971cad107e" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.950078 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="40c71638-2add-4f3c-acc9-cc971cad107e" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.951020 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.955456 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.955566 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.955588 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.956657 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:33:18 crc kubenswrapper[4701]: I1121 19:33:18.973700 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6"] Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.054591 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.054747 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zjhs\" (UniqueName: \"kubernetes.io/projected/e275d37d-b55f-433f-b4be-cfba6b7b158e-kube-api-access-5zjhs\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.054779 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.158040 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zjhs\" (UniqueName: \"kubernetes.io/projected/e275d37d-b55f-433f-b4be-cfba6b7b158e-kube-api-access-5zjhs\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.158102 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.158241 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.163211 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.163306 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.180728 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zjhs\" (UniqueName: \"kubernetes.io/projected/e275d37d-b55f-433f-b4be-cfba6b7b158e-kube-api-access-5zjhs\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.270877 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.916830 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6"] Nov 21 19:33:19 crc kubenswrapper[4701]: W1121 19:33:19.918052 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode275d37d_b55f_433f_b4be_cfba6b7b158e.slice/crio-960733681d32e7749b2414eac4040b1de0cd8760c0026208cf01229727c642ed WatchSource:0}: Error finding container 960733681d32e7749b2414eac4040b1de0cd8760c0026208cf01229727c642ed: Status 404 returned error can't find the container with id 960733681d32e7749b2414eac4040b1de0cd8760c0026208cf01229727c642ed Nov 21 19:33:19 crc kubenswrapper[4701]: I1121 19:33:19.921806 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:33:21 crc kubenswrapper[4701]: I1121 19:33:21.752842 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" event={"ID":"e275d37d-b55f-433f-b4be-cfba6b7b158e","Type":"ContainerStarted","Data":"045afb9a2d6b0b340ec31eec06abb47999f0d8e253e828a657455fd7df8509c1"} Nov 21 19:33:21 crc kubenswrapper[4701]: I1121 19:33:21.753753 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" event={"ID":"e275d37d-b55f-433f-b4be-cfba6b7b158e","Type":"ContainerStarted","Data":"960733681d32e7749b2414eac4040b1de0cd8760c0026208cf01229727c642ed"} Nov 21 19:33:21 crc kubenswrapper[4701]: I1121 19:33:21.787606 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" podStartSLOduration=3.402581027 podStartE2EDuration="3.787582137s" podCreationTimestamp="2025-11-21 19:33:18 +0000 UTC" firstStartedPulling="2025-11-21 19:33:19.921458097 +0000 UTC m=+1890.706598134" lastFinishedPulling="2025-11-21 19:33:20.306459207 +0000 UTC m=+1891.091599244" observedRunningTime="2025-11-21 19:33:21.780316309 +0000 UTC m=+1892.565456336" watchObservedRunningTime="2025-11-21 19:33:21.787582137 +0000 UTC m=+1892.572722164" Nov 21 19:34:27 crc kubenswrapper[4701]: I1121 19:34:27.577941 4701 generic.go:334] "Generic (PLEG): container finished" podID="e275d37d-b55f-433f-b4be-cfba6b7b158e" containerID="045afb9a2d6b0b340ec31eec06abb47999f0d8e253e828a657455fd7df8509c1" exitCode=0 Nov 21 19:34:27 crc kubenswrapper[4701]: I1121 19:34:27.578024 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" event={"ID":"e275d37d-b55f-433f-b4be-cfba6b7b158e","Type":"ContainerDied","Data":"045afb9a2d6b0b340ec31eec06abb47999f0d8e253e828a657455fd7df8509c1"} Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.107411 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.209290 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-inventory\") pod \"e275d37d-b55f-433f-b4be-cfba6b7b158e\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.209525 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zjhs\" (UniqueName: \"kubernetes.io/projected/e275d37d-b55f-433f-b4be-cfba6b7b158e-kube-api-access-5zjhs\") pod \"e275d37d-b55f-433f-b4be-cfba6b7b158e\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.209819 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-ssh-key\") pod \"e275d37d-b55f-433f-b4be-cfba6b7b158e\" (UID: \"e275d37d-b55f-433f-b4be-cfba6b7b158e\") " Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.218752 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e275d37d-b55f-433f-b4be-cfba6b7b158e-kube-api-access-5zjhs" (OuterVolumeSpecName: "kube-api-access-5zjhs") pod "e275d37d-b55f-433f-b4be-cfba6b7b158e" (UID: "e275d37d-b55f-433f-b4be-cfba6b7b158e"). InnerVolumeSpecName "kube-api-access-5zjhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.245177 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-inventory" (OuterVolumeSpecName: "inventory") pod "e275d37d-b55f-433f-b4be-cfba6b7b158e" (UID: "e275d37d-b55f-433f-b4be-cfba6b7b158e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.285123 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e275d37d-b55f-433f-b4be-cfba6b7b158e" (UID: "e275d37d-b55f-433f-b4be-cfba6b7b158e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.314184 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.314229 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zjhs\" (UniqueName: \"kubernetes.io/projected/e275d37d-b55f-433f-b4be-cfba6b7b158e-kube-api-access-5zjhs\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.314242 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e275d37d-b55f-433f-b4be-cfba6b7b158e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.604658 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" event={"ID":"e275d37d-b55f-433f-b4be-cfba6b7b158e","Type":"ContainerDied","Data":"960733681d32e7749b2414eac4040b1de0cd8760c0026208cf01229727c642ed"} Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.604708 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="960733681d32e7749b2414eac4040b1de0cd8760c0026208cf01229727c642ed" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.604784 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.741446 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mxn98"] Nov 21 19:34:29 crc kubenswrapper[4701]: E1121 19:34:29.742040 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e275d37d-b55f-433f-b4be-cfba6b7b158e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.742069 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e275d37d-b55f-433f-b4be-cfba6b7b158e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.742416 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="e275d37d-b55f-433f-b4be-cfba6b7b158e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.743580 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.747031 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.747697 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.747970 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.748518 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.757140 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mxn98"] Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.828524 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pjl5\" (UniqueName: \"kubernetes.io/projected/53533f20-cd97-4dfe-a00c-e5f0e6f86403-kube-api-access-4pjl5\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.828710 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.828799 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.931272 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.931400 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.931626 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pjl5\" (UniqueName: \"kubernetes.io/projected/53533f20-cd97-4dfe-a00c-e5f0e6f86403-kube-api-access-4pjl5\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.938452 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.940673 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:29 crc kubenswrapper[4701]: I1121 19:34:29.967178 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pjl5\" (UniqueName: \"kubernetes.io/projected/53533f20-cd97-4dfe-a00c-e5f0e6f86403-kube-api-access-4pjl5\") pod \"ssh-known-hosts-edpm-deployment-mxn98\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:30 crc kubenswrapper[4701]: I1121 19:34:30.065955 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:30 crc kubenswrapper[4701]: I1121 19:34:30.708452 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mxn98"] Nov 21 19:34:31 crc kubenswrapper[4701]: I1121 19:34:31.638261 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" event={"ID":"53533f20-cd97-4dfe-a00c-e5f0e6f86403","Type":"ContainerStarted","Data":"b6932ca55ff2c4e0d5d4166eb78885706fcdef1249342ae6db60e99cc964f881"} Nov 21 19:34:31 crc kubenswrapper[4701]: I1121 19:34:31.638851 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" event={"ID":"53533f20-cd97-4dfe-a00c-e5f0e6f86403","Type":"ContainerStarted","Data":"05d603a706eafedf8355031d3a0c4534758412c914aef396f8b91ed8a829b3ce"} Nov 21 19:34:31 crc kubenswrapper[4701]: I1121 19:34:31.675406 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" podStartSLOduration=2.201907948 podStartE2EDuration="2.675368916s" podCreationTimestamp="2025-11-21 19:34:29 +0000 UTC" firstStartedPulling="2025-11-21 19:34:30.715928706 +0000 UTC m=+1961.501068763" lastFinishedPulling="2025-11-21 19:34:31.189389674 +0000 UTC m=+1961.974529731" observedRunningTime="2025-11-21 19:34:31.662691817 +0000 UTC m=+1962.447831874" watchObservedRunningTime="2025-11-21 19:34:31.675368916 +0000 UTC m=+1962.460508973" Nov 21 19:34:39 crc kubenswrapper[4701]: I1121 19:34:39.760956 4701 generic.go:334] "Generic (PLEG): container finished" podID="53533f20-cd97-4dfe-a00c-e5f0e6f86403" containerID="b6932ca55ff2c4e0d5d4166eb78885706fcdef1249342ae6db60e99cc964f881" exitCode=0 Nov 21 19:34:39 crc kubenswrapper[4701]: I1121 19:34:39.761047 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" event={"ID":"53533f20-cd97-4dfe-a00c-e5f0e6f86403","Type":"ContainerDied","Data":"b6932ca55ff2c4e0d5d4166eb78885706fcdef1249342ae6db60e99cc964f881"} Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.385565 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.462692 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-ssh-key-openstack-edpm-ipam\") pod \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.462871 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-inventory-0\") pod \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.463000 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pjl5\" (UniqueName: \"kubernetes.io/projected/53533f20-cd97-4dfe-a00c-e5f0e6f86403-kube-api-access-4pjl5\") pod \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\" (UID: \"53533f20-cd97-4dfe-a00c-e5f0e6f86403\") " Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.487600 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53533f20-cd97-4dfe-a00c-e5f0e6f86403-kube-api-access-4pjl5" (OuterVolumeSpecName: "kube-api-access-4pjl5") pod "53533f20-cd97-4dfe-a00c-e5f0e6f86403" (UID: "53533f20-cd97-4dfe-a00c-e5f0e6f86403"). InnerVolumeSpecName "kube-api-access-4pjl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.523754 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "53533f20-cd97-4dfe-a00c-e5f0e6f86403" (UID: "53533f20-cd97-4dfe-a00c-e5f0e6f86403"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.525623 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "53533f20-cd97-4dfe-a00c-e5f0e6f86403" (UID: "53533f20-cd97-4dfe-a00c-e5f0e6f86403"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.567685 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.567878 4701 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/53533f20-cd97-4dfe-a00c-e5f0e6f86403-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.567969 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pjl5\" (UniqueName: \"kubernetes.io/projected/53533f20-cd97-4dfe-a00c-e5f0e6f86403-kube-api-access-4pjl5\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.792553 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" event={"ID":"53533f20-cd97-4dfe-a00c-e5f0e6f86403","Type":"ContainerDied","Data":"05d603a706eafedf8355031d3a0c4534758412c914aef396f8b91ed8a829b3ce"} Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.792606 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05d603a706eafedf8355031d3a0c4534758412c914aef396f8b91ed8a829b3ce" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.792646 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mxn98" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.928333 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2"] Nov 21 19:34:41 crc kubenswrapper[4701]: E1121 19:34:41.930636 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53533f20-cd97-4dfe-a00c-e5f0e6f86403" containerName="ssh-known-hosts-edpm-deployment" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.930683 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="53533f20-cd97-4dfe-a00c-e5f0e6f86403" containerName="ssh-known-hosts-edpm-deployment" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.931481 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="53533f20-cd97-4dfe-a00c-e5f0e6f86403" containerName="ssh-known-hosts-edpm-deployment" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.942454 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.950960 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.951489 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.951695 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.951921 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.976823 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2"] Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.983283 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.983489 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gj6r6\" (UniqueName: \"kubernetes.io/projected/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-kube-api-access-gj6r6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:41 crc kubenswrapper[4701]: I1121 19:34:41.983624 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.086239 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.086382 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gj6r6\" (UniqueName: \"kubernetes.io/projected/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-kube-api-access-gj6r6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.086456 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.090727 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.102500 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.103045 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gj6r6\" (UniqueName: \"kubernetes.io/projected/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-kube-api-access-gj6r6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-p5jn2\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:42 crc kubenswrapper[4701]: I1121 19:34:42.283144 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:43 crc kubenswrapper[4701]: I1121 19:34:43.011501 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2"] Nov 21 19:34:43 crc kubenswrapper[4701]: W1121 19:34:43.022594 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca5a5ac3_7a81_42f3_8a15_5fce2f096bd1.slice/crio-a0ce3a7dc24a1fcb67d18ac3d0bebf7989f1d7cfa509bb17415ef9bce9752264 WatchSource:0}: Error finding container a0ce3a7dc24a1fcb67d18ac3d0bebf7989f1d7cfa509bb17415ef9bce9752264: Status 404 returned error can't find the container with id a0ce3a7dc24a1fcb67d18ac3d0bebf7989f1d7cfa509bb17415ef9bce9752264 Nov 21 19:34:43 crc kubenswrapper[4701]: I1121 19:34:43.817443 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" event={"ID":"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1","Type":"ContainerStarted","Data":"6c8bf0cc3b4a3f3a74d99d583ee40e7a7f8fe895e383e4298c77971b99a8cf20"} Nov 21 19:34:43 crc kubenswrapper[4701]: I1121 19:34:43.817962 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" event={"ID":"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1","Type":"ContainerStarted","Data":"a0ce3a7dc24a1fcb67d18ac3d0bebf7989f1d7cfa509bb17415ef9bce9752264"} Nov 21 19:34:43 crc kubenswrapper[4701]: I1121 19:34:43.851918 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" podStartSLOduration=2.439667601 podStartE2EDuration="2.851848808s" podCreationTimestamp="2025-11-21 19:34:41 +0000 UTC" firstStartedPulling="2025-11-21 19:34:43.02733236 +0000 UTC m=+1973.812472427" lastFinishedPulling="2025-11-21 19:34:43.439513587 +0000 UTC m=+1974.224653634" observedRunningTime="2025-11-21 19:34:43.840024224 +0000 UTC m=+1974.625164261" watchObservedRunningTime="2025-11-21 19:34:43.851848808 +0000 UTC m=+1974.636988845" Nov 21 19:34:53 crc kubenswrapper[4701]: I1121 19:34:53.969402 4701 generic.go:334] "Generic (PLEG): container finished" podID="ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" containerID="6c8bf0cc3b4a3f3a74d99d583ee40e7a7f8fe895e383e4298c77971b99a8cf20" exitCode=0 Nov 21 19:34:53 crc kubenswrapper[4701]: I1121 19:34:53.973031 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" event={"ID":"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1","Type":"ContainerDied","Data":"6c8bf0cc3b4a3f3a74d99d583ee40e7a7f8fe895e383e4298c77971b99a8cf20"} Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.477357 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.594826 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-ssh-key\") pod \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.594942 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gj6r6\" (UniqueName: \"kubernetes.io/projected/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-kube-api-access-gj6r6\") pod \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.594988 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-inventory\") pod \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\" (UID: \"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1\") " Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.604802 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-kube-api-access-gj6r6" (OuterVolumeSpecName: "kube-api-access-gj6r6") pod "ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" (UID: "ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1"). InnerVolumeSpecName "kube-api-access-gj6r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.626013 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" (UID: "ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.636288 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-inventory" (OuterVolumeSpecName: "inventory") pod "ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" (UID: "ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.697658 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.697724 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gj6r6\" (UniqueName: \"kubernetes.io/projected/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-kube-api-access-gj6r6\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.697739 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.998348 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" event={"ID":"ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1","Type":"ContainerDied","Data":"a0ce3a7dc24a1fcb67d18ac3d0bebf7989f1d7cfa509bb17415ef9bce9752264"} Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.998410 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0ce3a7dc24a1fcb67d18ac3d0bebf7989f1d7cfa509bb17415ef9bce9752264" Nov 21 19:34:55 crc kubenswrapper[4701]: I1121 19:34:55.998475 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-p5jn2" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.112596 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb"] Nov 21 19:34:56 crc kubenswrapper[4701]: E1121 19:34:56.113354 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.113411 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.113752 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.114985 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.118303 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.119128 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.119425 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.126805 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb"] Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.127935 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.313920 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkmmd\" (UniqueName: \"kubernetes.io/projected/bcdfb82f-4b6c-44ca-b282-1f803082c73d-kube-api-access-pkmmd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.314635 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.314696 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.417340 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.417430 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.417493 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkmmd\" (UniqueName: \"kubernetes.io/projected/bcdfb82f-4b6c-44ca-b282-1f803082c73d-kube-api-access-pkmmd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.427856 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.430439 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.452733 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkmmd\" (UniqueName: \"kubernetes.io/projected/bcdfb82f-4b6c-44ca-b282-1f803082c73d-kube-api-access-pkmmd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:56 crc kubenswrapper[4701]: I1121 19:34:56.743951 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:34:57 crc kubenswrapper[4701]: I1121 19:34:57.430517 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb"] Nov 21 19:34:58 crc kubenswrapper[4701]: I1121 19:34:58.019590 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" event={"ID":"bcdfb82f-4b6c-44ca-b282-1f803082c73d","Type":"ContainerStarted","Data":"816f445b55ae2c5a72848c833eb5c7a9dee1f40e918a395f313ccb988d2f0538"} Nov 21 19:34:59 crc kubenswrapper[4701]: I1121 19:34:59.034814 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" event={"ID":"bcdfb82f-4b6c-44ca-b282-1f803082c73d","Type":"ContainerStarted","Data":"9740a8a8e9dc308408f194efef7cd374738b8ca3ea150e81cb2caa7c431b523b"} Nov 21 19:35:09 crc kubenswrapper[4701]: I1121 19:35:09.162874 4701 generic.go:334] "Generic (PLEG): container finished" podID="bcdfb82f-4b6c-44ca-b282-1f803082c73d" containerID="9740a8a8e9dc308408f194efef7cd374738b8ca3ea150e81cb2caa7c431b523b" exitCode=0 Nov 21 19:35:09 crc kubenswrapper[4701]: I1121 19:35:09.163714 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" event={"ID":"bcdfb82f-4b6c-44ca-b282-1f803082c73d","Type":"ContainerDied","Data":"9740a8a8e9dc308408f194efef7cd374738b8ca3ea150e81cb2caa7c431b523b"} Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.791532 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.920050 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkmmd\" (UniqueName: \"kubernetes.io/projected/bcdfb82f-4b6c-44ca-b282-1f803082c73d-kube-api-access-pkmmd\") pod \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.920666 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-ssh-key\") pod \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.920715 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-inventory\") pod \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\" (UID: \"bcdfb82f-4b6c-44ca-b282-1f803082c73d\") " Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.931777 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcdfb82f-4b6c-44ca-b282-1f803082c73d-kube-api-access-pkmmd" (OuterVolumeSpecName: "kube-api-access-pkmmd") pod "bcdfb82f-4b6c-44ca-b282-1f803082c73d" (UID: "bcdfb82f-4b6c-44ca-b282-1f803082c73d"). InnerVolumeSpecName "kube-api-access-pkmmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.967630 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bcdfb82f-4b6c-44ca-b282-1f803082c73d" (UID: "bcdfb82f-4b6c-44ca-b282-1f803082c73d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:35:10 crc kubenswrapper[4701]: I1121 19:35:10.968111 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-inventory" (OuterVolumeSpecName: "inventory") pod "bcdfb82f-4b6c-44ca-b282-1f803082c73d" (UID: "bcdfb82f-4b6c-44ca-b282-1f803082c73d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.026361 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkmmd\" (UniqueName: \"kubernetes.io/projected/bcdfb82f-4b6c-44ca-b282-1f803082c73d-kube-api-access-pkmmd\") on node \"crc\" DevicePath \"\"" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.026414 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.026425 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcdfb82f-4b6c-44ca-b282-1f803082c73d-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.191565 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" event={"ID":"bcdfb82f-4b6c-44ca-b282-1f803082c73d","Type":"ContainerDied","Data":"816f445b55ae2c5a72848c833eb5c7a9dee1f40e918a395f313ccb988d2f0538"} Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.191660 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="816f445b55ae2c5a72848c833eb5c7a9dee1f40e918a395f313ccb988d2f0538" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.191797 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.317057 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b"] Nov 21 19:35:11 crc kubenswrapper[4701]: E1121 19:35:11.318157 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcdfb82f-4b6c-44ca-b282-1f803082c73d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.318301 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcdfb82f-4b6c-44ca-b282-1f803082c73d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.318665 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcdfb82f-4b6c-44ca-b282-1f803082c73d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.319691 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.331594 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.338120 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.338284 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.338415 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.338707 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.339069 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.339291 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.339380 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.346622 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b"] Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.437526 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.437744 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.437802 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.437839 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.437991 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438033 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438060 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438141 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438262 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438296 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438390 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438422 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z2zm\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-kube-api-access-7z2zm\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438469 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.438576 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.541633 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.541798 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.541866 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.541910 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542001 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542055 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542098 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542144 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542246 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542293 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542530 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542569 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z2zm\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-kube-api-access-7z2zm\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542638 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.542690 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.548252 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.549490 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.549554 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.549669 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.551068 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.551586 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.551598 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.553078 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.553078 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.553471 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.555063 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.559258 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.559828 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.574280 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z2zm\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-kube-api-access-7z2zm\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:11 crc kubenswrapper[4701]: I1121 19:35:11.643696 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:35:12 crc kubenswrapper[4701]: I1121 19:35:12.311629 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b"] Nov 21 19:35:13 crc kubenswrapper[4701]: I1121 19:35:13.229053 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" event={"ID":"b4e65662-463f-4f48-b668-1ad55aaeb9fe","Type":"ContainerStarted","Data":"ae2b0dc7df68e4184d73d6c2618017932d7498d1ff5b3162cf501f083d15bc9a"} Nov 21 19:35:13 crc kubenswrapper[4701]: I1121 19:35:13.229113 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" event={"ID":"b4e65662-463f-4f48-b668-1ad55aaeb9fe","Type":"ContainerStarted","Data":"d21023da1a4783abf0daf105672766f9a82415dee5a5054282527dc3fb627bf3"} Nov 21 19:35:13 crc kubenswrapper[4701]: I1121 19:35:13.259916 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" podStartSLOduration=1.869105654 podStartE2EDuration="2.259896894s" podCreationTimestamp="2025-11-21 19:35:11 +0000 UTC" firstStartedPulling="2025-11-21 19:35:12.318218202 +0000 UTC m=+2003.103358229" lastFinishedPulling="2025-11-21 19:35:12.709009402 +0000 UTC m=+2003.494149469" observedRunningTime="2025-11-21 19:35:13.256772798 +0000 UTC m=+2004.041912825" watchObservedRunningTime="2025-11-21 19:35:13.259896894 +0000 UTC m=+2004.045036921" Nov 21 19:35:18 crc kubenswrapper[4701]: I1121 19:35:18.613289 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:35:18 crc kubenswrapper[4701]: I1121 19:35:18.613956 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.081646 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-djhrq"] Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.087918 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.098852 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-djhrq"] Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.121022 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-utilities\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.121303 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2kbc\" (UniqueName: \"kubernetes.io/projected/9a1a488b-1355-4eaa-a385-828e1315f431-kube-api-access-q2kbc\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.121491 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-catalog-content\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.224062 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2kbc\" (UniqueName: \"kubernetes.io/projected/9a1a488b-1355-4eaa-a385-828e1315f431-kube-api-access-q2kbc\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.224181 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-catalog-content\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.224269 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-utilities\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.225491 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-utilities\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.225533 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-catalog-content\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.269861 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2kbc\" (UniqueName: \"kubernetes.io/projected/9a1a488b-1355-4eaa-a385-828e1315f431-kube-api-access-q2kbc\") pod \"redhat-operators-djhrq\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.442696 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:39 crc kubenswrapper[4701]: I1121 19:35:39.933819 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-djhrq"] Nov 21 19:35:40 crc kubenswrapper[4701]: I1121 19:35:40.603228 4701 generic.go:334] "Generic (PLEG): container finished" podID="9a1a488b-1355-4eaa-a385-828e1315f431" containerID="90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697" exitCode=0 Nov 21 19:35:40 crc kubenswrapper[4701]: I1121 19:35:40.603319 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerDied","Data":"90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697"} Nov 21 19:35:40 crc kubenswrapper[4701]: I1121 19:35:40.603709 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerStarted","Data":"118cf1b24f9170884b53e8a5a642fc1df1631bff4dbe0e5d47cc36fa9e62bf76"} Nov 21 19:35:41 crc kubenswrapper[4701]: I1121 19:35:41.621533 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerStarted","Data":"bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d"} Nov 21 19:35:45 crc kubenswrapper[4701]: I1121 19:35:45.676377 4701 generic.go:334] "Generic (PLEG): container finished" podID="9a1a488b-1355-4eaa-a385-828e1315f431" containerID="bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d" exitCode=0 Nov 21 19:35:45 crc kubenswrapper[4701]: I1121 19:35:45.676452 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerDied","Data":"bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d"} Nov 21 19:35:46 crc kubenswrapper[4701]: I1121 19:35:46.695095 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerStarted","Data":"6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94"} Nov 21 19:35:46 crc kubenswrapper[4701]: I1121 19:35:46.729469 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-djhrq" podStartSLOduration=2.005744902 podStartE2EDuration="7.729443172s" podCreationTimestamp="2025-11-21 19:35:39 +0000 UTC" firstStartedPulling="2025-11-21 19:35:40.60554703 +0000 UTC m=+2031.390687057" lastFinishedPulling="2025-11-21 19:35:46.32924529 +0000 UTC m=+2037.114385327" observedRunningTime="2025-11-21 19:35:46.719655855 +0000 UTC m=+2037.504795892" watchObservedRunningTime="2025-11-21 19:35:46.729443172 +0000 UTC m=+2037.514583199" Nov 21 19:35:48 crc kubenswrapper[4701]: I1121 19:35:48.614240 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:35:48 crc kubenswrapper[4701]: I1121 19:35:48.614769 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:35:49 crc kubenswrapper[4701]: I1121 19:35:49.443729 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:49 crc kubenswrapper[4701]: I1121 19:35:49.443866 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:50 crc kubenswrapper[4701]: I1121 19:35:50.519088 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-djhrq" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="registry-server" probeResult="failure" output=< Nov 21 19:35:50 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:35:50 crc kubenswrapper[4701]: > Nov 21 19:35:59 crc kubenswrapper[4701]: I1121 19:35:59.540714 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:59 crc kubenswrapper[4701]: I1121 19:35:59.617281 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:35:59 crc kubenswrapper[4701]: I1121 19:35:59.792051 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-djhrq"] Nov 21 19:36:00 crc kubenswrapper[4701]: I1121 19:36:00.895829 4701 generic.go:334] "Generic (PLEG): container finished" podID="b4e65662-463f-4f48-b668-1ad55aaeb9fe" containerID="ae2b0dc7df68e4184d73d6c2618017932d7498d1ff5b3162cf501f083d15bc9a" exitCode=0 Nov 21 19:36:00 crc kubenswrapper[4701]: I1121 19:36:00.895925 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" event={"ID":"b4e65662-463f-4f48-b668-1ad55aaeb9fe","Type":"ContainerDied","Data":"ae2b0dc7df68e4184d73d6c2618017932d7498d1ff5b3162cf501f083d15bc9a"} Nov 21 19:36:00 crc kubenswrapper[4701]: I1121 19:36:00.897543 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-djhrq" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="registry-server" containerID="cri-o://6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94" gracePeriod=2 Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.486925 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.543584 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2kbc\" (UniqueName: \"kubernetes.io/projected/9a1a488b-1355-4eaa-a385-828e1315f431-kube-api-access-q2kbc\") pod \"9a1a488b-1355-4eaa-a385-828e1315f431\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.543690 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-catalog-content\") pod \"9a1a488b-1355-4eaa-a385-828e1315f431\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.543817 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-utilities\") pod \"9a1a488b-1355-4eaa-a385-828e1315f431\" (UID: \"9a1a488b-1355-4eaa-a385-828e1315f431\") " Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.546568 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-utilities" (OuterVolumeSpecName: "utilities") pod "9a1a488b-1355-4eaa-a385-828e1315f431" (UID: "9a1a488b-1355-4eaa-a385-828e1315f431"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.552888 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a1a488b-1355-4eaa-a385-828e1315f431-kube-api-access-q2kbc" (OuterVolumeSpecName: "kube-api-access-q2kbc") pod "9a1a488b-1355-4eaa-a385-828e1315f431" (UID: "9a1a488b-1355-4eaa-a385-828e1315f431"). InnerVolumeSpecName "kube-api-access-q2kbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.645756 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a1a488b-1355-4eaa-a385-828e1315f431" (UID: "9a1a488b-1355-4eaa-a385-828e1315f431"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.646665 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2kbc\" (UniqueName: \"kubernetes.io/projected/9a1a488b-1355-4eaa-a385-828e1315f431-kube-api-access-q2kbc\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.646704 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.646715 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a1a488b-1355-4eaa-a385-828e1315f431-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.909691 4701 generic.go:334] "Generic (PLEG): container finished" podID="9a1a488b-1355-4eaa-a385-828e1315f431" containerID="6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94" exitCode=0 Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.909738 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerDied","Data":"6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94"} Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.909784 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-djhrq" event={"ID":"9a1a488b-1355-4eaa-a385-828e1315f431","Type":"ContainerDied","Data":"118cf1b24f9170884b53e8a5a642fc1df1631bff4dbe0e5d47cc36fa9e62bf76"} Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.909716 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-djhrq" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.909803 4701 scope.go:117] "RemoveContainer" containerID="6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.978971 4701 scope.go:117] "RemoveContainer" containerID="bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d" Nov 21 19:36:01 crc kubenswrapper[4701]: I1121 19:36:01.989397 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-djhrq"] Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.004230 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-djhrq"] Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.015503 4701 scope.go:117] "RemoveContainer" containerID="90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.085151 4701 scope.go:117] "RemoveContainer" containerID="6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94" Nov 21 19:36:02 crc kubenswrapper[4701]: E1121 19:36:02.085687 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94\": container with ID starting with 6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94 not found: ID does not exist" containerID="6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.085723 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94"} err="failed to get container status \"6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94\": rpc error: code = NotFound desc = could not find container \"6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94\": container with ID starting with 6ff3531c73d3844e1308a04378ae5cf02991e524eee72755c732e84f1c41ee94 not found: ID does not exist" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.085748 4701 scope.go:117] "RemoveContainer" containerID="bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d" Nov 21 19:36:02 crc kubenswrapper[4701]: E1121 19:36:02.086107 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d\": container with ID starting with bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d not found: ID does not exist" containerID="bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.086131 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d"} err="failed to get container status \"bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d\": rpc error: code = NotFound desc = could not find container \"bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d\": container with ID starting with bfd63d0a443993e6b66cbfdcb4ef2f7d31e2b1a2a35fd1e4087f13a12a9f0f0d not found: ID does not exist" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.086144 4701 scope.go:117] "RemoveContainer" containerID="90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697" Nov 21 19:36:02 crc kubenswrapper[4701]: E1121 19:36:02.086484 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697\": container with ID starting with 90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697 not found: ID does not exist" containerID="90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.086503 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697"} err="failed to get container status \"90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697\": rpc error: code = NotFound desc = could not find container \"90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697\": container with ID starting with 90c236971c336d0bdcb51cb5f2308a3569d719397bb84e65a988cb009bb16697 not found: ID does not exist" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.513250 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585281 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-libvirt-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585397 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ovn-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585420 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-nova-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585462 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585494 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585547 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-neutron-metadata-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585614 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z2zm\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-kube-api-access-7z2zm\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585653 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585688 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ssh-key\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585726 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-telemetry-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585755 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-inventory\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585788 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585808 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-bootstrap-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.585823 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-repo-setup-combined-ca-bundle\") pod \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\" (UID: \"b4e65662-463f-4f48-b668-1ad55aaeb9fe\") " Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.594521 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.594565 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.595087 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.596416 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.596531 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.597036 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.597353 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.601315 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.601368 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.601815 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-kube-api-access-7z2zm" (OuterVolumeSpecName: "kube-api-access-7z2zm") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "kube-api-access-7z2zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.602029 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.619675 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.636648 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.638575 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-inventory" (OuterVolumeSpecName: "inventory") pod "b4e65662-463f-4f48-b668-1ad55aaeb9fe" (UID: "b4e65662-463f-4f48-b668-1ad55aaeb9fe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688654 4701 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688700 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688712 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688724 4701 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688736 4701 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688747 4701 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688757 4701 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688765 4701 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688774 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688786 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688796 4701 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688808 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z2zm\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-kube-api-access-7z2zm\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688819 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b4e65662-463f-4f48-b668-1ad55aaeb9fe-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.688829 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4e65662-463f-4f48-b668-1ad55aaeb9fe-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.928469 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" event={"ID":"b4e65662-463f-4f48-b668-1ad55aaeb9fe","Type":"ContainerDied","Data":"d21023da1a4783abf0daf105672766f9a82415dee5a5054282527dc3fb627bf3"} Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.928528 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d21023da1a4783abf0daf105672766f9a82415dee5a5054282527dc3fb627bf3" Nov 21 19:36:02 crc kubenswrapper[4701]: I1121 19:36:02.928593 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.095862 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc"] Nov 21 19:36:03 crc kubenswrapper[4701]: E1121 19:36:03.096624 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4e65662-463f-4f48-b668-1ad55aaeb9fe" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.096654 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4e65662-463f-4f48-b668-1ad55aaeb9fe" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 19:36:03 crc kubenswrapper[4701]: E1121 19:36:03.096725 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="registry-server" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.096737 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="registry-server" Nov 21 19:36:03 crc kubenswrapper[4701]: E1121 19:36:03.096756 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="extract-content" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.096764 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="extract-content" Nov 21 19:36:03 crc kubenswrapper[4701]: E1121 19:36:03.096780 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="extract-utilities" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.096791 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="extract-utilities" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.097100 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4e65662-463f-4f48-b668-1ad55aaeb9fe" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.097156 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" containerName="registry-server" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.098433 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.105817 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.106706 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.107130 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.108221 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.108337 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.122065 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc"] Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.203540 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.203668 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.203711 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w28mt\" (UniqueName: \"kubernetes.io/projected/640fcd44-4a2e-475b-b296-5f37ac6d55e7-kube-api-access-w28mt\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.203774 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.203808 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.307944 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.308047 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.308081 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w28mt\" (UniqueName: \"kubernetes.io/projected/640fcd44-4a2e-475b-b296-5f37ac6d55e7-kube-api-access-w28mt\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.308146 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.308189 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.309537 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.313006 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.313588 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.314980 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.327231 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w28mt\" (UniqueName: \"kubernetes.io/projected/640fcd44-4a2e-475b-b296-5f37ac6d55e7-kube-api-access-w28mt\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rdjpc\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.434417 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:36:03 crc kubenswrapper[4701]: I1121 19:36:03.971089 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a1a488b-1355-4eaa-a385-828e1315f431" path="/var/lib/kubelet/pods/9a1a488b-1355-4eaa-a385-828e1315f431/volumes" Nov 21 19:36:04 crc kubenswrapper[4701]: I1121 19:36:04.038303 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc"] Nov 21 19:36:04 crc kubenswrapper[4701]: I1121 19:36:04.958650 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" event={"ID":"640fcd44-4a2e-475b-b296-5f37ac6d55e7","Type":"ContainerStarted","Data":"b3aa7209b203a5da644976a3607d986688a5257c917bc8d5b5a73b63b86377e6"} Nov 21 19:36:04 crc kubenswrapper[4701]: I1121 19:36:04.959082 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" event={"ID":"640fcd44-4a2e-475b-b296-5f37ac6d55e7","Type":"ContainerStarted","Data":"b83bdaa74b5bc3688dcbf3b251d2a472561e44dba876faa43f24937d4e1cd664"} Nov 21 19:36:04 crc kubenswrapper[4701]: I1121 19:36:04.985551 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" podStartSLOduration=1.4855268050000001 podStartE2EDuration="1.985529007s" podCreationTimestamp="2025-11-21 19:36:03 +0000 UTC" firstStartedPulling="2025-11-21 19:36:04.042287186 +0000 UTC m=+2054.827427263" lastFinishedPulling="2025-11-21 19:36:04.542289408 +0000 UTC m=+2055.327429465" observedRunningTime="2025-11-21 19:36:04.982595897 +0000 UTC m=+2055.767735954" watchObservedRunningTime="2025-11-21 19:36:04.985529007 +0000 UTC m=+2055.770669034" Nov 21 19:36:18 crc kubenswrapper[4701]: I1121 19:36:18.613957 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:36:18 crc kubenswrapper[4701]: I1121 19:36:18.615095 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:36:18 crc kubenswrapper[4701]: I1121 19:36:18.615184 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:36:18 crc kubenswrapper[4701]: I1121 19:36:18.616663 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7fbc56daee349295117a0bd71dcbc8f65f0cba0ed64444e6d442311c3039877d"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:36:18 crc kubenswrapper[4701]: I1121 19:36:18.616968 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://7fbc56daee349295117a0bd71dcbc8f65f0cba0ed64444e6d442311c3039877d" gracePeriod=600 Nov 21 19:36:19 crc kubenswrapper[4701]: I1121 19:36:19.146541 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="7fbc56daee349295117a0bd71dcbc8f65f0cba0ed64444e6d442311c3039877d" exitCode=0 Nov 21 19:36:19 crc kubenswrapper[4701]: I1121 19:36:19.146616 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"7fbc56daee349295117a0bd71dcbc8f65f0cba0ed64444e6d442311c3039877d"} Nov 21 19:36:19 crc kubenswrapper[4701]: I1121 19:36:19.146988 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560"} Nov 21 19:36:19 crc kubenswrapper[4701]: I1121 19:36:19.147012 4701 scope.go:117] "RemoveContainer" containerID="081447c4e1559ffc58e33e136a78fa7b343ef9791f855a491b27a72a49d8cde6" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.329786 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bk2mz"] Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.333597 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.351891 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bk2mz"] Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.504669 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-utilities\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.504883 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rbzb\" (UniqueName: \"kubernetes.io/projected/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-kube-api-access-5rbzb\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.504950 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-catalog-content\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.606871 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-catalog-content\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.607010 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-utilities\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.607115 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rbzb\" (UniqueName: \"kubernetes.io/projected/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-kube-api-access-5rbzb\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.607832 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-catalog-content\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.607843 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-utilities\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.633726 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rbzb\" (UniqueName: \"kubernetes.io/projected/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-kube-api-access-5rbzb\") pod \"certified-operators-bk2mz\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:27 crc kubenswrapper[4701]: I1121 19:36:27.709893 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:28 crc kubenswrapper[4701]: I1121 19:36:28.266810 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bk2mz"] Nov 21 19:36:28 crc kubenswrapper[4701]: I1121 19:36:28.303554 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerStarted","Data":"facf0dac39bdb6505f2c3ebdedcdbcf459d206278f9a07689eb29ecf84c30a79"} Nov 21 19:36:29 crc kubenswrapper[4701]: I1121 19:36:29.320971 4701 generic.go:334] "Generic (PLEG): container finished" podID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerID="3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd" exitCode=0 Nov 21 19:36:29 crc kubenswrapper[4701]: I1121 19:36:29.321820 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerDied","Data":"3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd"} Nov 21 19:36:30 crc kubenswrapper[4701]: I1121 19:36:30.338327 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerStarted","Data":"71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25"} Nov 21 19:36:32 crc kubenswrapper[4701]: I1121 19:36:32.365815 4701 generic.go:334] "Generic (PLEG): container finished" podID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerID="71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25" exitCode=0 Nov 21 19:36:32 crc kubenswrapper[4701]: I1121 19:36:32.365933 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerDied","Data":"71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25"} Nov 21 19:36:33 crc kubenswrapper[4701]: I1121 19:36:33.390145 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerStarted","Data":"63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578"} Nov 21 19:36:33 crc kubenswrapper[4701]: I1121 19:36:33.453091 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bk2mz" podStartSLOduration=3.020382512 podStartE2EDuration="6.453054365s" podCreationTimestamp="2025-11-21 19:36:27 +0000 UTC" firstStartedPulling="2025-11-21 19:36:29.32618617 +0000 UTC m=+2080.111326207" lastFinishedPulling="2025-11-21 19:36:32.758858023 +0000 UTC m=+2083.543998060" observedRunningTime="2025-11-21 19:36:33.431891487 +0000 UTC m=+2084.217031514" watchObservedRunningTime="2025-11-21 19:36:33.453054365 +0000 UTC m=+2084.238194402" Nov 21 19:36:37 crc kubenswrapper[4701]: I1121 19:36:37.710195 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:37 crc kubenswrapper[4701]: I1121 19:36:37.710967 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:37 crc kubenswrapper[4701]: I1121 19:36:37.796566 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:38 crc kubenswrapper[4701]: I1121 19:36:38.563407 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:38 crc kubenswrapper[4701]: I1121 19:36:38.639754 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bk2mz"] Nov 21 19:36:40 crc kubenswrapper[4701]: I1121 19:36:40.487130 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bk2mz" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="registry-server" containerID="cri-o://63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578" gracePeriod=2 Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.031714 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.100441 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-utilities\") pod \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.100862 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rbzb\" (UniqueName: \"kubernetes.io/projected/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-kube-api-access-5rbzb\") pod \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.100979 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-catalog-content\") pod \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\" (UID: \"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b\") " Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.103067 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-utilities" (OuterVolumeSpecName: "utilities") pod "f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" (UID: "f1e0703a-adea-4efb-9ba4-d8b916e4ef2b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.110317 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-kube-api-access-5rbzb" (OuterVolumeSpecName: "kube-api-access-5rbzb") pod "f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" (UID: "f1e0703a-adea-4efb-9ba4-d8b916e4ef2b"). InnerVolumeSpecName "kube-api-access-5rbzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.204234 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rbzb\" (UniqueName: \"kubernetes.io/projected/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-kube-api-access-5rbzb\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.204275 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.238145 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" (UID: "f1e0703a-adea-4efb-9ba4-d8b916e4ef2b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.306340 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.504605 4701 generic.go:334] "Generic (PLEG): container finished" podID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerID="63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578" exitCode=0 Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.504674 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerDied","Data":"63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578"} Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.504734 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bk2mz" event={"ID":"f1e0703a-adea-4efb-9ba4-d8b916e4ef2b","Type":"ContainerDied","Data":"facf0dac39bdb6505f2c3ebdedcdbcf459d206278f9a07689eb29ecf84c30a79"} Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.504763 4701 scope.go:117] "RemoveContainer" containerID="63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.504818 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bk2mz" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.569720 4701 scope.go:117] "RemoveContainer" containerID="71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.571679 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bk2mz"] Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.587701 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bk2mz"] Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.618561 4701 scope.go:117] "RemoveContainer" containerID="3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.692282 4701 scope.go:117] "RemoveContainer" containerID="63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578" Nov 21 19:36:41 crc kubenswrapper[4701]: E1121 19:36:41.692905 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578\": container with ID starting with 63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578 not found: ID does not exist" containerID="63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.692952 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578"} err="failed to get container status \"63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578\": rpc error: code = NotFound desc = could not find container \"63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578\": container with ID starting with 63f4a887fbf9b29fb4133dcf19af34a28810750d5977532800f135486ca5a578 not found: ID does not exist" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.692990 4701 scope.go:117] "RemoveContainer" containerID="71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25" Nov 21 19:36:41 crc kubenswrapper[4701]: E1121 19:36:41.693809 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25\": container with ID starting with 71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25 not found: ID does not exist" containerID="71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.693854 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25"} err="failed to get container status \"71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25\": rpc error: code = NotFound desc = could not find container \"71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25\": container with ID starting with 71d73ef5bf731fcdcb306ad61e2c691157e43a08e09cb0ec6cae3ce060488e25 not found: ID does not exist" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.693879 4701 scope.go:117] "RemoveContainer" containerID="3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd" Nov 21 19:36:41 crc kubenswrapper[4701]: E1121 19:36:41.694425 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd\": container with ID starting with 3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd not found: ID does not exist" containerID="3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.694484 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd"} err="failed to get container status \"3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd\": rpc error: code = NotFound desc = could not find container \"3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd\": container with ID starting with 3d433f4ca80177ac26302bae2699f0990b72ae8a7e0770a355b473a93a6b44fd not found: ID does not exist" Nov 21 19:36:41 crc kubenswrapper[4701]: I1121 19:36:41.970750 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" path="/var/lib/kubelet/pods/f1e0703a-adea-4efb-9ba4-d8b916e4ef2b/volumes" Nov 21 19:37:25 crc kubenswrapper[4701]: I1121 19:37:25.084762 4701 generic.go:334] "Generic (PLEG): container finished" podID="640fcd44-4a2e-475b-b296-5f37ac6d55e7" containerID="b3aa7209b203a5da644976a3607d986688a5257c917bc8d5b5a73b63b86377e6" exitCode=0 Nov 21 19:37:25 crc kubenswrapper[4701]: I1121 19:37:25.085240 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" event={"ID":"640fcd44-4a2e-475b-b296-5f37ac6d55e7","Type":"ContainerDied","Data":"b3aa7209b203a5da644976a3607d986688a5257c917bc8d5b5a73b63b86377e6"} Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.675193 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.757596 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w28mt\" (UniqueName: \"kubernetes.io/projected/640fcd44-4a2e-475b-b296-5f37ac6d55e7-kube-api-access-w28mt\") pod \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.757665 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovncontroller-config-0\") pod \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.757800 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovn-combined-ca-bundle\") pod \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.758046 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-inventory\") pod \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.758082 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ssh-key\") pod \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\" (UID: \"640fcd44-4a2e-475b-b296-5f37ac6d55e7\") " Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.767789 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/640fcd44-4a2e-475b-b296-5f37ac6d55e7-kube-api-access-w28mt" (OuterVolumeSpecName: "kube-api-access-w28mt") pod "640fcd44-4a2e-475b-b296-5f37ac6d55e7" (UID: "640fcd44-4a2e-475b-b296-5f37ac6d55e7"). InnerVolumeSpecName "kube-api-access-w28mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.768537 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "640fcd44-4a2e-475b-b296-5f37ac6d55e7" (UID: "640fcd44-4a2e-475b-b296-5f37ac6d55e7"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.802428 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-inventory" (OuterVolumeSpecName: "inventory") pod "640fcd44-4a2e-475b-b296-5f37ac6d55e7" (UID: "640fcd44-4a2e-475b-b296-5f37ac6d55e7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.817252 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "640fcd44-4a2e-475b-b296-5f37ac6d55e7" (UID: "640fcd44-4a2e-475b-b296-5f37ac6d55e7"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.830423 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "640fcd44-4a2e-475b-b296-5f37ac6d55e7" (UID: "640fcd44-4a2e-475b-b296-5f37ac6d55e7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.862055 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w28mt\" (UniqueName: \"kubernetes.io/projected/640fcd44-4a2e-475b-b296-5f37ac6d55e7-kube-api-access-w28mt\") on node \"crc\" DevicePath \"\"" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.862101 4701 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.862114 4701 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.862126 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:37:26 crc kubenswrapper[4701]: I1121 19:37:26.862137 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/640fcd44-4a2e-475b-b296-5f37ac6d55e7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.119578 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" event={"ID":"640fcd44-4a2e-475b-b296-5f37ac6d55e7","Type":"ContainerDied","Data":"b83bdaa74b5bc3688dcbf3b251d2a472561e44dba876faa43f24937d4e1cd664"} Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.119640 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b83bdaa74b5bc3688dcbf3b251d2a472561e44dba876faa43f24937d4e1cd664" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.119611 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rdjpc" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.263137 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5"] Nov 21 19:37:27 crc kubenswrapper[4701]: E1121 19:37:27.264123 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="registry-server" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.264301 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="registry-server" Nov 21 19:37:27 crc kubenswrapper[4701]: E1121 19:37:27.264394 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="extract-content" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.264470 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="extract-content" Nov 21 19:37:27 crc kubenswrapper[4701]: E1121 19:37:27.264607 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="extract-utilities" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.264694 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="extract-utilities" Nov 21 19:37:27 crc kubenswrapper[4701]: E1121 19:37:27.264804 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="640fcd44-4a2e-475b-b296-5f37ac6d55e7" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.264924 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="640fcd44-4a2e-475b-b296-5f37ac6d55e7" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.265290 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e0703a-adea-4efb-9ba4-d8b916e4ef2b" containerName="registry-server" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.265424 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="640fcd44-4a2e-475b-b296-5f37ac6d55e7" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.266662 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.273150 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.273306 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.273345 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.273592 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.273838 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.273851 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.301410 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5"] Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.375263 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.375375 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.375487 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.375693 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhrx4\" (UniqueName: \"kubernetes.io/projected/44f05c88-6707-4ca9-a248-d5abc8ae5850-kube-api-access-nhrx4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.375743 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.375784 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.477841 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.477910 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.477972 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.478143 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhrx4\" (UniqueName: \"kubernetes.io/projected/44f05c88-6707-4ca9-a248-d5abc8ae5850-kube-api-access-nhrx4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.478216 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.478254 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.482261 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.484217 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.485470 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.488022 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.491255 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.502401 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhrx4\" (UniqueName: \"kubernetes.io/projected/44f05c88-6707-4ca9-a248-d5abc8ae5850-kube-api-access-nhrx4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:27 crc kubenswrapper[4701]: I1121 19:37:27.599593 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:37:28 crc kubenswrapper[4701]: I1121 19:37:28.058193 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5"] Nov 21 19:37:28 crc kubenswrapper[4701]: I1121 19:37:28.132601 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" event={"ID":"44f05c88-6707-4ca9-a248-d5abc8ae5850","Type":"ContainerStarted","Data":"e8f55411adf77dc81f996503d007743ae0740304fbbd98efecba8be80476f352"} Nov 21 19:37:29 crc kubenswrapper[4701]: I1121 19:37:29.142931 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" event={"ID":"44f05c88-6707-4ca9-a248-d5abc8ae5850","Type":"ContainerStarted","Data":"dcd35b71cacdae5e918ba9631f16f2435d0d19ecd8a44ec44d41e8863e619a86"} Nov 21 19:37:29 crc kubenswrapper[4701]: I1121 19:37:29.193274 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" podStartSLOduration=1.8076865469999999 podStartE2EDuration="2.19323102s" podCreationTimestamp="2025-11-21 19:37:27 +0000 UTC" firstStartedPulling="2025-11-21 19:37:28.060510996 +0000 UTC m=+2138.845651023" lastFinishedPulling="2025-11-21 19:37:28.446055429 +0000 UTC m=+2139.231195496" observedRunningTime="2025-11-21 19:37:29.172005939 +0000 UTC m=+2139.957145986" watchObservedRunningTime="2025-11-21 19:37:29.19323102 +0000 UTC m=+2139.978371077" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.120450 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8dh"] Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.123398 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.131458 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8dh"] Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.208559 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-catalog-content\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.208701 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-utilities\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.208735 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdzwr\" (UniqueName: \"kubernetes.io/projected/b77fde64-e89c-4969-9f47-a21d4df4f93d-kube-api-access-mdzwr\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.310333 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-utilities\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.310617 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdzwr\" (UniqueName: \"kubernetes.io/projected/b77fde64-e89c-4969-9f47-a21d4df4f93d-kube-api-access-mdzwr\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.310776 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-utilities\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.310853 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-catalog-content\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.311065 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-catalog-content\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.343809 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdzwr\" (UniqueName: \"kubernetes.io/projected/b77fde64-e89c-4969-9f47-a21d4df4f93d-kube-api-access-mdzwr\") pod \"redhat-marketplace-kw8dh\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.451771 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:26 crc kubenswrapper[4701]: I1121 19:38:26.980050 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8dh"] Nov 21 19:38:26 crc kubenswrapper[4701]: W1121 19:38:26.993918 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb77fde64_e89c_4969_9f47_a21d4df4f93d.slice/crio-c5cf4fdf73edd8bc4a64fb30cf3253929f633368882834258134fa339030cc9a WatchSource:0}: Error finding container c5cf4fdf73edd8bc4a64fb30cf3253929f633368882834258134fa339030cc9a: Status 404 returned error can't find the container with id c5cf4fdf73edd8bc4a64fb30cf3253929f633368882834258134fa339030cc9a Nov 21 19:38:27 crc kubenswrapper[4701]: I1121 19:38:27.917961 4701 generic.go:334] "Generic (PLEG): container finished" podID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerID="8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104" exitCode=0 Nov 21 19:38:27 crc kubenswrapper[4701]: I1121 19:38:27.918108 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerDied","Data":"8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104"} Nov 21 19:38:27 crc kubenswrapper[4701]: I1121 19:38:27.918761 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerStarted","Data":"c5cf4fdf73edd8bc4a64fb30cf3253929f633368882834258134fa339030cc9a"} Nov 21 19:38:27 crc kubenswrapper[4701]: I1121 19:38:27.920976 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:38:28 crc kubenswrapper[4701]: I1121 19:38:28.933403 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerStarted","Data":"5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d"} Nov 21 19:38:29 crc kubenswrapper[4701]: I1121 19:38:29.947916 4701 generic.go:334] "Generic (PLEG): container finished" podID="44f05c88-6707-4ca9-a248-d5abc8ae5850" containerID="dcd35b71cacdae5e918ba9631f16f2435d0d19ecd8a44ec44d41e8863e619a86" exitCode=0 Nov 21 19:38:29 crc kubenswrapper[4701]: I1121 19:38:29.947986 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" event={"ID":"44f05c88-6707-4ca9-a248-d5abc8ae5850","Type":"ContainerDied","Data":"dcd35b71cacdae5e918ba9631f16f2435d0d19ecd8a44ec44d41e8863e619a86"} Nov 21 19:38:29 crc kubenswrapper[4701]: I1121 19:38:29.953869 4701 generic.go:334] "Generic (PLEG): container finished" podID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerID="5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d" exitCode=0 Nov 21 19:38:29 crc kubenswrapper[4701]: I1121 19:38:29.974527 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerDied","Data":"5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d"} Nov 21 19:38:30 crc kubenswrapper[4701]: I1121 19:38:30.969802 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerStarted","Data":"5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b"} Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.003515 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kw8dh" podStartSLOduration=2.5703179929999997 podStartE2EDuration="5.003493227s" podCreationTimestamp="2025-11-21 19:38:26 +0000 UTC" firstStartedPulling="2025-11-21 19:38:27.920670089 +0000 UTC m=+2198.705810126" lastFinishedPulling="2025-11-21 19:38:30.353845303 +0000 UTC m=+2201.138985360" observedRunningTime="2025-11-21 19:38:30.992894197 +0000 UTC m=+2201.778034234" watchObservedRunningTime="2025-11-21 19:38:31.003493227 +0000 UTC m=+2201.788633264" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.490676 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.548004 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhrx4\" (UniqueName: \"kubernetes.io/projected/44f05c88-6707-4ca9-a248-d5abc8ae5850-kube-api-access-nhrx4\") pod \"44f05c88-6707-4ca9-a248-d5abc8ae5850\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.548180 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-ovn-metadata-agent-neutron-config-0\") pod \"44f05c88-6707-4ca9-a248-d5abc8ae5850\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.548467 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-metadata-combined-ca-bundle\") pod \"44f05c88-6707-4ca9-a248-d5abc8ae5850\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.548500 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-nova-metadata-neutron-config-0\") pod \"44f05c88-6707-4ca9-a248-d5abc8ae5850\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.548530 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-ssh-key\") pod \"44f05c88-6707-4ca9-a248-d5abc8ae5850\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.548609 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-inventory\") pod \"44f05c88-6707-4ca9-a248-d5abc8ae5850\" (UID: \"44f05c88-6707-4ca9-a248-d5abc8ae5850\") " Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.560417 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "44f05c88-6707-4ca9-a248-d5abc8ae5850" (UID: "44f05c88-6707-4ca9-a248-d5abc8ae5850"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.569542 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44f05c88-6707-4ca9-a248-d5abc8ae5850-kube-api-access-nhrx4" (OuterVolumeSpecName: "kube-api-access-nhrx4") pod "44f05c88-6707-4ca9-a248-d5abc8ae5850" (UID: "44f05c88-6707-4ca9-a248-d5abc8ae5850"). InnerVolumeSpecName "kube-api-access-nhrx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.597032 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "44f05c88-6707-4ca9-a248-d5abc8ae5850" (UID: "44f05c88-6707-4ca9-a248-d5abc8ae5850"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.603451 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-inventory" (OuterVolumeSpecName: "inventory") pod "44f05c88-6707-4ca9-a248-d5abc8ae5850" (UID: "44f05c88-6707-4ca9-a248-d5abc8ae5850"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.630175 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "44f05c88-6707-4ca9-a248-d5abc8ae5850" (UID: "44f05c88-6707-4ca9-a248-d5abc8ae5850"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.638577 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "44f05c88-6707-4ca9-a248-d5abc8ae5850" (UID: "44f05c88-6707-4ca9-a248-d5abc8ae5850"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.654088 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhrx4\" (UniqueName: \"kubernetes.io/projected/44f05c88-6707-4ca9-a248-d5abc8ae5850-kube-api-access-nhrx4\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.654151 4701 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.654192 4701 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.654277 4701 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.654294 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.654343 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/44f05c88-6707-4ca9-a248-d5abc8ae5850-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.992374 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.992779 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5" event={"ID":"44f05c88-6707-4ca9-a248-d5abc8ae5850","Type":"ContainerDied","Data":"e8f55411adf77dc81f996503d007743ae0740304fbbd98efecba8be80476f352"} Nov 21 19:38:31 crc kubenswrapper[4701]: I1121 19:38:31.992811 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8f55411adf77dc81f996503d007743ae0740304fbbd98efecba8be80476f352" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.114333 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89"] Nov 21 19:38:32 crc kubenswrapper[4701]: E1121 19:38:32.114844 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f05c88-6707-4ca9-a248-d5abc8ae5850" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.114866 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f05c88-6707-4ca9-a248-d5abc8ae5850" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.115160 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f05c88-6707-4ca9-a248-d5abc8ae5850" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.116053 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.120032 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.120245 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.124543 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.124640 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.126455 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89"] Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.130395 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.172580 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.172775 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.172846 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5lh6\" (UniqueName: \"kubernetes.io/projected/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-kube-api-access-q5lh6\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.172907 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.172941 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.274697 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.274761 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5lh6\" (UniqueName: \"kubernetes.io/projected/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-kube-api-access-q5lh6\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.274815 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.274839 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.274880 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.279932 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.282159 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.282546 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.294459 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.296798 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5lh6\" (UniqueName: \"kubernetes.io/projected/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-kube-api-access-q5lh6\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-fls89\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:32 crc kubenswrapper[4701]: I1121 19:38:32.456188 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:38:33 crc kubenswrapper[4701]: I1121 19:38:33.158019 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89"] Nov 21 19:38:33 crc kubenswrapper[4701]: W1121 19:38:33.167767 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17c2df3e_2efb_4d8f_9e8a_ebecac6fb0bc.slice/crio-a27e05654c54760d23a85d1f33fc201658abc8c07a621978a2a497a961a8e9c6 WatchSource:0}: Error finding container a27e05654c54760d23a85d1f33fc201658abc8c07a621978a2a497a961a8e9c6: Status 404 returned error can't find the container with id a27e05654c54760d23a85d1f33fc201658abc8c07a621978a2a497a961a8e9c6 Nov 21 19:38:34 crc kubenswrapper[4701]: I1121 19:38:34.022528 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" event={"ID":"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc","Type":"ContainerStarted","Data":"45c2513320cb532309452fced02b963e6286ad3cd5e154075faf644ea40c273c"} Nov 21 19:38:34 crc kubenswrapper[4701]: I1121 19:38:34.023275 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" event={"ID":"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc","Type":"ContainerStarted","Data":"a27e05654c54760d23a85d1f33fc201658abc8c07a621978a2a497a961a8e9c6"} Nov 21 19:38:34 crc kubenswrapper[4701]: I1121 19:38:34.045727 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" podStartSLOduration=1.577551742 podStartE2EDuration="2.045705433s" podCreationTimestamp="2025-11-21 19:38:32 +0000 UTC" firstStartedPulling="2025-11-21 19:38:33.180565486 +0000 UTC m=+2203.965705533" lastFinishedPulling="2025-11-21 19:38:33.648719197 +0000 UTC m=+2204.433859224" observedRunningTime="2025-11-21 19:38:34.040420328 +0000 UTC m=+2204.825560355" watchObservedRunningTime="2025-11-21 19:38:34.045705433 +0000 UTC m=+2204.830845460" Nov 21 19:38:36 crc kubenswrapper[4701]: I1121 19:38:36.452195 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:36 crc kubenswrapper[4701]: I1121 19:38:36.452714 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:36 crc kubenswrapper[4701]: I1121 19:38:36.535430 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:37 crc kubenswrapper[4701]: I1121 19:38:37.138529 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:37 crc kubenswrapper[4701]: I1121 19:38:37.219243 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8dh"] Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.088667 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kw8dh" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="registry-server" containerID="cri-o://5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b" gracePeriod=2 Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.672559 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.870732 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-catalog-content\") pod \"b77fde64-e89c-4969-9f47-a21d4df4f93d\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.870795 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdzwr\" (UniqueName: \"kubernetes.io/projected/b77fde64-e89c-4969-9f47-a21d4df4f93d-kube-api-access-mdzwr\") pod \"b77fde64-e89c-4969-9f47-a21d4df4f93d\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.870843 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-utilities\") pod \"b77fde64-e89c-4969-9f47-a21d4df4f93d\" (UID: \"b77fde64-e89c-4969-9f47-a21d4df4f93d\") " Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.872925 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-utilities" (OuterVolumeSpecName: "utilities") pod "b77fde64-e89c-4969-9f47-a21d4df4f93d" (UID: "b77fde64-e89c-4969-9f47-a21d4df4f93d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.887431 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b77fde64-e89c-4969-9f47-a21d4df4f93d-kube-api-access-mdzwr" (OuterVolumeSpecName: "kube-api-access-mdzwr") pod "b77fde64-e89c-4969-9f47-a21d4df4f93d" (UID: "b77fde64-e89c-4969-9f47-a21d4df4f93d"). InnerVolumeSpecName "kube-api-access-mdzwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.916257 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b77fde64-e89c-4969-9f47-a21d4df4f93d" (UID: "b77fde64-e89c-4969-9f47-a21d4df4f93d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.974701 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.974758 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdzwr\" (UniqueName: \"kubernetes.io/projected/b77fde64-e89c-4969-9f47-a21d4df4f93d-kube-api-access-mdzwr\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:39 crc kubenswrapper[4701]: I1121 19:38:39.974778 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b77fde64-e89c-4969-9f47-a21d4df4f93d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.104091 4701 generic.go:334] "Generic (PLEG): container finished" podID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerID="5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b" exitCode=0 Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.104141 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerDied","Data":"5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b"} Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.104175 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8dh" event={"ID":"b77fde64-e89c-4969-9f47-a21d4df4f93d","Type":"ContainerDied","Data":"c5cf4fdf73edd8bc4a64fb30cf3253929f633368882834258134fa339030cc9a"} Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.104194 4701 scope.go:117] "RemoveContainer" containerID="5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.105747 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8dh" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.136978 4701 scope.go:117] "RemoveContainer" containerID="5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.137176 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8dh"] Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.147240 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8dh"] Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.169877 4701 scope.go:117] "RemoveContainer" containerID="8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.218921 4701 scope.go:117] "RemoveContainer" containerID="5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b" Nov 21 19:38:40 crc kubenswrapper[4701]: E1121 19:38:40.219609 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b\": container with ID starting with 5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b not found: ID does not exist" containerID="5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.219664 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b"} err="failed to get container status \"5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b\": rpc error: code = NotFound desc = could not find container \"5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b\": container with ID starting with 5765eb9c16ad7809b72c921d7455ec26ae674ad00eacf5c5c559b28deb7c242b not found: ID does not exist" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.219703 4701 scope.go:117] "RemoveContainer" containerID="5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d" Nov 21 19:38:40 crc kubenswrapper[4701]: E1121 19:38:40.220113 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d\": container with ID starting with 5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d not found: ID does not exist" containerID="5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.220175 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d"} err="failed to get container status \"5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d\": rpc error: code = NotFound desc = could not find container \"5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d\": container with ID starting with 5cc4f62c63f68e29746c5fcc66a4240779fa99cc1566f032b3dd95d95a4e0d4d not found: ID does not exist" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.220238 4701 scope.go:117] "RemoveContainer" containerID="8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104" Nov 21 19:38:40 crc kubenswrapper[4701]: E1121 19:38:40.220938 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104\": container with ID starting with 8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104 not found: ID does not exist" containerID="8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104" Nov 21 19:38:40 crc kubenswrapper[4701]: I1121 19:38:40.220971 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104"} err="failed to get container status \"8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104\": rpc error: code = NotFound desc = could not find container \"8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104\": container with ID starting with 8e10d46de5b055ab41275f0308a10e222e8de2856c88469acad3a8080fac9104 not found: ID does not exist" Nov 21 19:38:41 crc kubenswrapper[4701]: I1121 19:38:41.968602 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" path="/var/lib/kubelet/pods/b77fde64-e89c-4969-9f47-a21d4df4f93d/volumes" Nov 21 19:38:48 crc kubenswrapper[4701]: I1121 19:38:48.614040 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:38:48 crc kubenswrapper[4701]: I1121 19:38:48.614442 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:39:18 crc kubenswrapper[4701]: I1121 19:39:18.614018 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:39:18 crc kubenswrapper[4701]: I1121 19:39:18.615089 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:39:48 crc kubenswrapper[4701]: I1121 19:39:48.613764 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:39:48 crc kubenswrapper[4701]: I1121 19:39:48.614484 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:39:48 crc kubenswrapper[4701]: I1121 19:39:48.614541 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:39:48 crc kubenswrapper[4701]: I1121 19:39:48.615491 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:39:48 crc kubenswrapper[4701]: I1121 19:39:48.615560 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" gracePeriod=600 Nov 21 19:39:49 crc kubenswrapper[4701]: I1121 19:39:49.063097 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" exitCode=0 Nov 21 19:39:49 crc kubenswrapper[4701]: I1121 19:39:49.063194 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560"} Nov 21 19:39:49 crc kubenswrapper[4701]: I1121 19:39:49.063330 4701 scope.go:117] "RemoveContainer" containerID="7fbc56daee349295117a0bd71dcbc8f65f0cba0ed64444e6d442311c3039877d" Nov 21 19:39:49 crc kubenswrapper[4701]: E1121 19:39:49.265668 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:39:50 crc kubenswrapper[4701]: I1121 19:39:50.076254 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:39:50 crc kubenswrapper[4701]: E1121 19:39:50.076627 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:40:03 crc kubenswrapper[4701]: I1121 19:40:03.951619 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:40:03 crc kubenswrapper[4701]: E1121 19:40:03.953485 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:40:14 crc kubenswrapper[4701]: I1121 19:40:14.952000 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:40:14 crc kubenswrapper[4701]: E1121 19:40:14.952992 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:40:27 crc kubenswrapper[4701]: I1121 19:40:27.951341 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:40:27 crc kubenswrapper[4701]: E1121 19:40:27.952972 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:40:38 crc kubenswrapper[4701]: I1121 19:40:38.952072 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:40:38 crc kubenswrapper[4701]: E1121 19:40:38.953848 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.529570 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hr47h"] Nov 21 19:40:40 crc kubenswrapper[4701]: E1121 19:40:40.531423 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="extract-utilities" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.531458 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="extract-utilities" Nov 21 19:40:40 crc kubenswrapper[4701]: E1121 19:40:40.531514 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="extract-content" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.531534 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="extract-content" Nov 21 19:40:40 crc kubenswrapper[4701]: E1121 19:40:40.531605 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="registry-server" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.531623 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="registry-server" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.532055 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="b77fde64-e89c-4969-9f47-a21d4df4f93d" containerName="registry-server" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.536270 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.544473 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hr47h"] Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.713506 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-utilities\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.714046 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzbvm\" (UniqueName: \"kubernetes.io/projected/e03c4bbd-d174-4fed-b621-46b44b5fe502-kube-api-access-rzbvm\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.714357 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-catalog-content\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.818503 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-utilities\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.818605 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzbvm\" (UniqueName: \"kubernetes.io/projected/e03c4bbd-d174-4fed-b621-46b44b5fe502-kube-api-access-rzbvm\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.818662 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-catalog-content\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.819422 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-utilities\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.820399 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-catalog-content\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.847964 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzbvm\" (UniqueName: \"kubernetes.io/projected/e03c4bbd-d174-4fed-b621-46b44b5fe502-kube-api-access-rzbvm\") pod \"community-operators-hr47h\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:40 crc kubenswrapper[4701]: I1121 19:40:40.878233 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:41 crc kubenswrapper[4701]: I1121 19:40:41.333335 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hr47h"] Nov 21 19:40:41 crc kubenswrapper[4701]: I1121 19:40:41.780072 4701 generic.go:334] "Generic (PLEG): container finished" podID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerID="0773be9f594c98e721af8b17f87d7260434b4ddec7b13ed933f0820bee748eef" exitCode=0 Nov 21 19:40:41 crc kubenswrapper[4701]: I1121 19:40:41.780475 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerDied","Data":"0773be9f594c98e721af8b17f87d7260434b4ddec7b13ed933f0820bee748eef"} Nov 21 19:40:41 crc kubenswrapper[4701]: I1121 19:40:41.781567 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerStarted","Data":"52d78caf80783783442f7e962acd983c51d69d6592c92688faca2c4639a01e3a"} Nov 21 19:40:42 crc kubenswrapper[4701]: I1121 19:40:42.796559 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerStarted","Data":"e5d00552790230b54d5b11f51dba8ce787e3ef98ce281437647df0fcbec9be60"} Nov 21 19:40:43 crc kubenswrapper[4701]: I1121 19:40:43.810531 4701 generic.go:334] "Generic (PLEG): container finished" podID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerID="e5d00552790230b54d5b11f51dba8ce787e3ef98ce281437647df0fcbec9be60" exitCode=0 Nov 21 19:40:43 crc kubenswrapper[4701]: I1121 19:40:43.810790 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerDied","Data":"e5d00552790230b54d5b11f51dba8ce787e3ef98ce281437647df0fcbec9be60"} Nov 21 19:40:44 crc kubenswrapper[4701]: I1121 19:40:44.827363 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerStarted","Data":"7a3efc4fd81aa0a9c6eb8e7569595917fa74a8d9c56eecc881330a13d107c9be"} Nov 21 19:40:44 crc kubenswrapper[4701]: I1121 19:40:44.866412 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hr47h" podStartSLOduration=2.371485888 podStartE2EDuration="4.866389554s" podCreationTimestamp="2025-11-21 19:40:40 +0000 UTC" firstStartedPulling="2025-11-21 19:40:41.782951621 +0000 UTC m=+2332.568091648" lastFinishedPulling="2025-11-21 19:40:44.277855277 +0000 UTC m=+2335.062995314" observedRunningTime="2025-11-21 19:40:44.857572494 +0000 UTC m=+2335.642712531" watchObservedRunningTime="2025-11-21 19:40:44.866389554 +0000 UTC m=+2335.651529591" Nov 21 19:40:50 crc kubenswrapper[4701]: I1121 19:40:50.879368 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:50 crc kubenswrapper[4701]: I1121 19:40:50.879817 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:50 crc kubenswrapper[4701]: I1121 19:40:50.971785 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:51 crc kubenswrapper[4701]: I1121 19:40:51.050866 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:51 crc kubenswrapper[4701]: I1121 19:40:51.236100 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hr47h"] Nov 21 19:40:51 crc kubenswrapper[4701]: I1121 19:40:51.950984 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:40:51 crc kubenswrapper[4701]: E1121 19:40:51.951822 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:40:52 crc kubenswrapper[4701]: I1121 19:40:52.946231 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hr47h" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="registry-server" containerID="cri-o://7a3efc4fd81aa0a9c6eb8e7569595917fa74a8d9c56eecc881330a13d107c9be" gracePeriod=2 Nov 21 19:40:53 crc kubenswrapper[4701]: I1121 19:40:53.973308 4701 generic.go:334] "Generic (PLEG): container finished" podID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerID="7a3efc4fd81aa0a9c6eb8e7569595917fa74a8d9c56eecc881330a13d107c9be" exitCode=0 Nov 21 19:40:53 crc kubenswrapper[4701]: I1121 19:40:53.976109 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerDied","Data":"7a3efc4fd81aa0a9c6eb8e7569595917fa74a8d9c56eecc881330a13d107c9be"} Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.151795 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.291002 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-catalog-content\") pod \"e03c4bbd-d174-4fed-b621-46b44b5fe502\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.292227 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzbvm\" (UniqueName: \"kubernetes.io/projected/e03c4bbd-d174-4fed-b621-46b44b5fe502-kube-api-access-rzbvm\") pod \"e03c4bbd-d174-4fed-b621-46b44b5fe502\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.292568 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-utilities\") pod \"e03c4bbd-d174-4fed-b621-46b44b5fe502\" (UID: \"e03c4bbd-d174-4fed-b621-46b44b5fe502\") " Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.294275 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-utilities" (OuterVolumeSpecName: "utilities") pod "e03c4bbd-d174-4fed-b621-46b44b5fe502" (UID: "e03c4bbd-d174-4fed-b621-46b44b5fe502"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.309340 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e03c4bbd-d174-4fed-b621-46b44b5fe502-kube-api-access-rzbvm" (OuterVolumeSpecName: "kube-api-access-rzbvm") pod "e03c4bbd-d174-4fed-b621-46b44b5fe502" (UID: "e03c4bbd-d174-4fed-b621-46b44b5fe502"). InnerVolumeSpecName "kube-api-access-rzbvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.353980 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e03c4bbd-d174-4fed-b621-46b44b5fe502" (UID: "e03c4bbd-d174-4fed-b621-46b44b5fe502"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.396855 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.396929 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzbvm\" (UniqueName: \"kubernetes.io/projected/e03c4bbd-d174-4fed-b621-46b44b5fe502-kube-api-access-rzbvm\") on node \"crc\" DevicePath \"\"" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.396960 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e03c4bbd-d174-4fed-b621-46b44b5fe502-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.988174 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hr47h" event={"ID":"e03c4bbd-d174-4fed-b621-46b44b5fe502","Type":"ContainerDied","Data":"52d78caf80783783442f7e962acd983c51d69d6592c92688faca2c4639a01e3a"} Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.988273 4701 scope.go:117] "RemoveContainer" containerID="7a3efc4fd81aa0a9c6eb8e7569595917fa74a8d9c56eecc881330a13d107c9be" Nov 21 19:40:54 crc kubenswrapper[4701]: I1121 19:40:54.988429 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hr47h" Nov 21 19:40:55 crc kubenswrapper[4701]: I1121 19:40:55.012683 4701 scope.go:117] "RemoveContainer" containerID="e5d00552790230b54d5b11f51dba8ce787e3ef98ce281437647df0fcbec9be60" Nov 21 19:40:55 crc kubenswrapper[4701]: I1121 19:40:55.040409 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hr47h"] Nov 21 19:40:55 crc kubenswrapper[4701]: I1121 19:40:55.054501 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hr47h"] Nov 21 19:40:55 crc kubenswrapper[4701]: I1121 19:40:55.067130 4701 scope.go:117] "RemoveContainer" containerID="0773be9f594c98e721af8b17f87d7260434b4ddec7b13ed933f0820bee748eef" Nov 21 19:40:55 crc kubenswrapper[4701]: I1121 19:40:55.975351 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" path="/var/lib/kubelet/pods/e03c4bbd-d174-4fed-b621-46b44b5fe502/volumes" Nov 21 19:41:05 crc kubenswrapper[4701]: I1121 19:41:05.951154 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:41:05 crc kubenswrapper[4701]: E1121 19:41:05.952133 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:41:18 crc kubenswrapper[4701]: I1121 19:41:18.951392 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:41:18 crc kubenswrapper[4701]: E1121 19:41:18.952925 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:41:29 crc kubenswrapper[4701]: I1121 19:41:29.969267 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:41:29 crc kubenswrapper[4701]: E1121 19:41:29.970595 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:41:44 crc kubenswrapper[4701]: I1121 19:41:44.952637 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:41:44 crc kubenswrapper[4701]: E1121 19:41:44.954001 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:41:59 crc kubenswrapper[4701]: I1121 19:41:59.966924 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:41:59 crc kubenswrapper[4701]: E1121 19:41:59.967799 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:42:14 crc kubenswrapper[4701]: I1121 19:42:14.952575 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:42:14 crc kubenswrapper[4701]: E1121 19:42:14.953768 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:42:28 crc kubenswrapper[4701]: I1121 19:42:28.952125 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:42:28 crc kubenswrapper[4701]: E1121 19:42:28.955441 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:42:40 crc kubenswrapper[4701]: I1121 19:42:40.951737 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:42:40 crc kubenswrapper[4701]: E1121 19:42:40.952828 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:42:51 crc kubenswrapper[4701]: I1121 19:42:51.952014 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:42:51 crc kubenswrapper[4701]: E1121 19:42:51.953669 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:43:05 crc kubenswrapper[4701]: I1121 19:43:05.952035 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:43:05 crc kubenswrapper[4701]: E1121 19:43:05.953979 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:43:19 crc kubenswrapper[4701]: I1121 19:43:19.968821 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:43:19 crc kubenswrapper[4701]: E1121 19:43:19.970421 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:43:30 crc kubenswrapper[4701]: I1121 19:43:30.953281 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:43:30 crc kubenswrapper[4701]: E1121 19:43:30.954106 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:43:31 crc kubenswrapper[4701]: I1121 19:43:31.348904 4701 generic.go:334] "Generic (PLEG): container finished" podID="17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" containerID="45c2513320cb532309452fced02b963e6286ad3cd5e154075faf644ea40c273c" exitCode=0 Nov 21 19:43:31 crc kubenswrapper[4701]: I1121 19:43:31.348950 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" event={"ID":"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc","Type":"ContainerDied","Data":"45c2513320cb532309452fced02b963e6286ad3cd5e154075faf644ea40c273c"} Nov 21 19:43:32 crc kubenswrapper[4701]: I1121 19:43:32.972153 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.073931 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-inventory\") pod \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.074263 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-ssh-key\") pod \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.074314 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-secret-0\") pod \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.074387 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5lh6\" (UniqueName: \"kubernetes.io/projected/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-kube-api-access-q5lh6\") pod \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.074522 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-combined-ca-bundle\") pod \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\" (UID: \"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc\") " Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.082697 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-kube-api-access-q5lh6" (OuterVolumeSpecName: "kube-api-access-q5lh6") pod "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" (UID: "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc"). InnerVolumeSpecName "kube-api-access-q5lh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.083509 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" (UID: "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.108756 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" (UID: "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.110458 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-inventory" (OuterVolumeSpecName: "inventory") pod "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" (UID: "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.118613 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" (UID: "17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.177618 4701 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.177674 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.177690 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.177703 4701 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.177716 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5lh6\" (UniqueName: \"kubernetes.io/projected/17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc-kube-api-access-q5lh6\") on node \"crc\" DevicePath \"\"" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.376640 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" event={"ID":"17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc","Type":"ContainerDied","Data":"a27e05654c54760d23a85d1f33fc201658abc8c07a621978a2a497a961a8e9c6"} Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.376704 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a27e05654c54760d23a85d1f33fc201658abc8c07a621978a2a497a961a8e9c6" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.376796 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-fls89" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.560004 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh"] Nov 21 19:43:33 crc kubenswrapper[4701]: E1121 19:43:33.561057 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="extract-utilities" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.561086 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="extract-utilities" Nov 21 19:43:33 crc kubenswrapper[4701]: E1121 19:43:33.561131 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="extract-content" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.561140 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="extract-content" Nov 21 19:43:33 crc kubenswrapper[4701]: E1121 19:43:33.561179 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="registry-server" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.561186 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="registry-server" Nov 21 19:43:33 crc kubenswrapper[4701]: E1121 19:43:33.561214 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.561225 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.561622 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="e03c4bbd-d174-4fed-b621-46b44b5fe502" containerName="registry-server" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.561645 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.562968 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.584091 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.586868 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.587265 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.587378 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.587992 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.588094 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.588233 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.603342 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh"] Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.700461 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.700637 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6z9z\" (UniqueName: \"kubernetes.io/projected/a50527e7-3b38-471d-a03d-937e88e019f3-kube-api-access-j6z9z\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.700701 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.700756 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.700971 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.701091 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.701475 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a50527e7-3b38-471d-a03d-937e88e019f3-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.701589 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.701694 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804178 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804385 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a50527e7-3b38-471d-a03d-937e88e019f3-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804452 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804515 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804613 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804657 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6z9z\" (UniqueName: \"kubernetes.io/projected/a50527e7-3b38-471d-a03d-937e88e019f3-kube-api-access-j6z9z\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804694 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.804741 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.805161 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.807350 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a50527e7-3b38-471d-a03d-937e88e019f3-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.810550 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.811229 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.812431 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.813424 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.813896 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.821747 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.822346 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.832288 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6z9z\" (UniqueName: \"kubernetes.io/projected/a50527e7-3b38-471d-a03d-937e88e019f3-kube-api-access-j6z9z\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rhwkh\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:33 crc kubenswrapper[4701]: I1121 19:43:33.896508 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:43:34 crc kubenswrapper[4701]: I1121 19:43:34.558133 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh"] Nov 21 19:43:34 crc kubenswrapper[4701]: I1121 19:43:34.566146 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:43:35 crc kubenswrapper[4701]: I1121 19:43:35.410515 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" event={"ID":"a50527e7-3b38-471d-a03d-937e88e019f3","Type":"ContainerStarted","Data":"137873829e7f00f9aff410a4049f1cb1779fba078380d1e17acc630ca5ebfca4"} Nov 21 19:43:35 crc kubenswrapper[4701]: I1121 19:43:35.411120 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" event={"ID":"a50527e7-3b38-471d-a03d-937e88e019f3","Type":"ContainerStarted","Data":"947a26a0cd458dae8ee84cc79b9a2de46e1be93fe202a7e72312e5cc11c0050b"} Nov 21 19:43:35 crc kubenswrapper[4701]: I1121 19:43:35.446670 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" podStartSLOduration=2.022313177 podStartE2EDuration="2.446635345s" podCreationTimestamp="2025-11-21 19:43:33 +0000 UTC" firstStartedPulling="2025-11-21 19:43:34.565833204 +0000 UTC m=+2505.350973231" lastFinishedPulling="2025-11-21 19:43:34.990155362 +0000 UTC m=+2505.775295399" observedRunningTime="2025-11-21 19:43:35.43216413 +0000 UTC m=+2506.217304167" watchObservedRunningTime="2025-11-21 19:43:35.446635345 +0000 UTC m=+2506.231775382" Nov 21 19:43:45 crc kubenswrapper[4701]: I1121 19:43:45.952007 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:43:45 crc kubenswrapper[4701]: E1121 19:43:45.953172 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:43:56 crc kubenswrapper[4701]: I1121 19:43:56.951991 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:43:56 crc kubenswrapper[4701]: E1121 19:43:56.953094 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:44:08 crc kubenswrapper[4701]: I1121 19:44:08.952524 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:44:08 crc kubenswrapper[4701]: E1121 19:44:08.953830 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:44:22 crc kubenswrapper[4701]: I1121 19:44:22.952341 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:44:22 crc kubenswrapper[4701]: E1121 19:44:22.955902 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:44:35 crc kubenswrapper[4701]: I1121 19:44:35.951770 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:44:35 crc kubenswrapper[4701]: E1121 19:44:35.953267 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:44:48 crc kubenswrapper[4701]: I1121 19:44:48.951304 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:44:49 crc kubenswrapper[4701]: I1121 19:44:49.387087 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"1da90685f868faf9a72c4d22ec36474fb36df1e592760b3aafb20f1467e23cc0"} Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.178874 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6"] Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.184267 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.188913 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.189108 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.204107 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6"] Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.275821 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-secret-volume\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.275920 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df25n\" (UniqueName: \"kubernetes.io/projected/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-kube-api-access-df25n\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.275947 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-config-volume\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.378812 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-secret-volume\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.378973 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df25n\" (UniqueName: \"kubernetes.io/projected/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-kube-api-access-df25n\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.379017 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-config-volume\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.380392 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-config-volume\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.395047 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-secret-volume\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.400370 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df25n\" (UniqueName: \"kubernetes.io/projected/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-kube-api-access-df25n\") pod \"collect-profiles-29395905-v6sr6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:00 crc kubenswrapper[4701]: I1121 19:45:00.538914 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:01 crc kubenswrapper[4701]: I1121 19:45:01.087731 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6"] Nov 21 19:45:01 crc kubenswrapper[4701]: W1121 19:45:01.109564 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod210c4103_e8cf_4b16_bb3b_2363cb5d24e6.slice/crio-1cb762974a290cb356f0d0a759afc81df6112236280278304514ed99bf1a91d6 WatchSource:0}: Error finding container 1cb762974a290cb356f0d0a759afc81df6112236280278304514ed99bf1a91d6: Status 404 returned error can't find the container with id 1cb762974a290cb356f0d0a759afc81df6112236280278304514ed99bf1a91d6 Nov 21 19:45:01 crc kubenswrapper[4701]: I1121 19:45:01.541219 4701 generic.go:334] "Generic (PLEG): container finished" podID="210c4103-e8cf-4b16-bb3b-2363cb5d24e6" containerID="5a4aac1587422cb8ed11599ae79ddc74b896e58b323d71c549d742191a31c9bc" exitCode=0 Nov 21 19:45:01 crc kubenswrapper[4701]: I1121 19:45:01.541614 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" event={"ID":"210c4103-e8cf-4b16-bb3b-2363cb5d24e6","Type":"ContainerDied","Data":"5a4aac1587422cb8ed11599ae79ddc74b896e58b323d71c549d742191a31c9bc"} Nov 21 19:45:01 crc kubenswrapper[4701]: I1121 19:45:01.541649 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" event={"ID":"210c4103-e8cf-4b16-bb3b-2363cb5d24e6","Type":"ContainerStarted","Data":"1cb762974a290cb356f0d0a759afc81df6112236280278304514ed99bf1a91d6"} Nov 21 19:45:02 crc kubenswrapper[4701]: I1121 19:45:02.983743 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.048141 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-config-volume\") pod \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.048216 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df25n\" (UniqueName: \"kubernetes.io/projected/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-kube-api-access-df25n\") pod \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.048455 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-secret-volume\") pod \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\" (UID: \"210c4103-e8cf-4b16-bb3b-2363cb5d24e6\") " Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.049677 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-config-volume" (OuterVolumeSpecName: "config-volume") pod "210c4103-e8cf-4b16-bb3b-2363cb5d24e6" (UID: "210c4103-e8cf-4b16-bb3b-2363cb5d24e6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.060508 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "210c4103-e8cf-4b16-bb3b-2363cb5d24e6" (UID: "210c4103-e8cf-4b16-bb3b-2363cb5d24e6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.060752 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-kube-api-access-df25n" (OuterVolumeSpecName: "kube-api-access-df25n") pod "210c4103-e8cf-4b16-bb3b-2363cb5d24e6" (UID: "210c4103-e8cf-4b16-bb3b-2363cb5d24e6"). InnerVolumeSpecName "kube-api-access-df25n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.151685 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.151729 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df25n\" (UniqueName: \"kubernetes.io/projected/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-kube-api-access-df25n\") on node \"crc\" DevicePath \"\"" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.151745 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/210c4103-e8cf-4b16-bb3b-2363cb5d24e6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.575139 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" event={"ID":"210c4103-e8cf-4b16-bb3b-2363cb5d24e6","Type":"ContainerDied","Data":"1cb762974a290cb356f0d0a759afc81df6112236280278304514ed99bf1a91d6"} Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.575687 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cb762974a290cb356f0d0a759afc81df6112236280278304514ed99bf1a91d6" Nov 21 19:45:03 crc kubenswrapper[4701]: I1121 19:45:03.575450 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6" Nov 21 19:45:04 crc kubenswrapper[4701]: I1121 19:45:04.109528 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb"] Nov 21 19:45:04 crc kubenswrapper[4701]: I1121 19:45:04.123786 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395860-72qpb"] Nov 21 19:45:05 crc kubenswrapper[4701]: I1121 19:45:05.988646 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="276e2cb3-e02e-4122-b10b-a454198b7954" path="/var/lib/kubelet/pods/276e2cb3-e02e-4122-b10b-a454198b7954/volumes" Nov 21 19:45:10 crc kubenswrapper[4701]: I1121 19:45:10.548623 4701 scope.go:117] "RemoveContainer" containerID="43c55175c367845afbb71d4d61ac30931c3eaa6f41b08f77af904180bcecf56f" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.322641 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2wkdb"] Nov 21 19:46:23 crc kubenswrapper[4701]: E1121 19:46:23.324724 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="210c4103-e8cf-4b16-bb3b-2363cb5d24e6" containerName="collect-profiles" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.324762 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="210c4103-e8cf-4b16-bb3b-2363cb5d24e6" containerName="collect-profiles" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.325163 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="210c4103-e8cf-4b16-bb3b-2363cb5d24e6" containerName="collect-profiles" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.328800 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.347172 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2wkdb"] Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.370935 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcrp8\" (UniqueName: \"kubernetes.io/projected/74763d9d-b2ce-41bb-9f17-4ec6f349134a-kube-api-access-lcrp8\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.371013 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-catalog-content\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.371076 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-utilities\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.473012 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcrp8\" (UniqueName: \"kubernetes.io/projected/74763d9d-b2ce-41bb-9f17-4ec6f349134a-kube-api-access-lcrp8\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.473091 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-catalog-content\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.473131 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-utilities\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.474083 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-utilities\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.474272 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-catalog-content\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.496294 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcrp8\" (UniqueName: \"kubernetes.io/projected/74763d9d-b2ce-41bb-9f17-4ec6f349134a-kube-api-access-lcrp8\") pod \"redhat-operators-2wkdb\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:23 crc kubenswrapper[4701]: I1121 19:46:23.659982 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:24 crc kubenswrapper[4701]: I1121 19:46:24.191424 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2wkdb"] Nov 21 19:46:25 crc kubenswrapper[4701]: I1121 19:46:25.137695 4701 generic.go:334] "Generic (PLEG): container finished" podID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerID="3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56" exitCode=0 Nov 21 19:46:25 crc kubenswrapper[4701]: I1121 19:46:25.137841 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerDied","Data":"3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56"} Nov 21 19:46:25 crc kubenswrapper[4701]: I1121 19:46:25.138438 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerStarted","Data":"bf3715c51ea6d4bf48e54a50914f8129fa537d17b01c3f9f691b94f5637ded25"} Nov 21 19:46:27 crc kubenswrapper[4701]: I1121 19:46:27.163596 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerStarted","Data":"3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a"} Nov 21 19:46:30 crc kubenswrapper[4701]: I1121 19:46:30.207260 4701 generic.go:334] "Generic (PLEG): container finished" podID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerID="3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a" exitCode=0 Nov 21 19:46:30 crc kubenswrapper[4701]: I1121 19:46:30.207380 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerDied","Data":"3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a"} Nov 21 19:46:31 crc kubenswrapper[4701]: I1121 19:46:31.240267 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerStarted","Data":"34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313"} Nov 21 19:46:31 crc kubenswrapper[4701]: I1121 19:46:31.268684 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2wkdb" podStartSLOduration=2.779650925 podStartE2EDuration="8.268651294s" podCreationTimestamp="2025-11-21 19:46:23 +0000 UTC" firstStartedPulling="2025-11-21 19:46:25.140509034 +0000 UTC m=+2675.925649101" lastFinishedPulling="2025-11-21 19:46:30.629509413 +0000 UTC m=+2681.414649470" observedRunningTime="2025-11-21 19:46:31.263979788 +0000 UTC m=+2682.049119815" watchObservedRunningTime="2025-11-21 19:46:31.268651294 +0000 UTC m=+2682.053791361" Nov 21 19:46:33 crc kubenswrapper[4701]: I1121 19:46:33.660291 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:33 crc kubenswrapper[4701]: I1121 19:46:33.660763 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:34 crc kubenswrapper[4701]: I1121 19:46:34.711968 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2wkdb" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="registry-server" probeResult="failure" output=< Nov 21 19:46:34 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:46:34 crc kubenswrapper[4701]: > Nov 21 19:46:43 crc kubenswrapper[4701]: I1121 19:46:43.728594 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:43 crc kubenswrapper[4701]: I1121 19:46:43.793965 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:43 crc kubenswrapper[4701]: I1121 19:46:43.979600 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2wkdb"] Nov 21 19:46:45 crc kubenswrapper[4701]: I1121 19:46:45.422678 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2wkdb" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="registry-server" containerID="cri-o://34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313" gracePeriod=2 Nov 21 19:46:45 crc kubenswrapper[4701]: I1121 19:46:45.993450 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.147898 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-catalog-content\") pod \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.148724 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-utilities\") pod \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.148846 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcrp8\" (UniqueName: \"kubernetes.io/projected/74763d9d-b2ce-41bb-9f17-4ec6f349134a-kube-api-access-lcrp8\") pod \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\" (UID: \"74763d9d-b2ce-41bb-9f17-4ec6f349134a\") " Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.149818 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-utilities" (OuterVolumeSpecName: "utilities") pod "74763d9d-b2ce-41bb-9f17-4ec6f349134a" (UID: "74763d9d-b2ce-41bb-9f17-4ec6f349134a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.160611 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74763d9d-b2ce-41bb-9f17-4ec6f349134a-kube-api-access-lcrp8" (OuterVolumeSpecName: "kube-api-access-lcrp8") pod "74763d9d-b2ce-41bb-9f17-4ec6f349134a" (UID: "74763d9d-b2ce-41bb-9f17-4ec6f349134a"). InnerVolumeSpecName "kube-api-access-lcrp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.252186 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.252240 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcrp8\" (UniqueName: \"kubernetes.io/projected/74763d9d-b2ce-41bb-9f17-4ec6f349134a-kube-api-access-lcrp8\") on node \"crc\" DevicePath \"\"" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.266252 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "74763d9d-b2ce-41bb-9f17-4ec6f349134a" (UID: "74763d9d-b2ce-41bb-9f17-4ec6f349134a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.354155 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74763d9d-b2ce-41bb-9f17-4ec6f349134a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.439603 4701 generic.go:334] "Generic (PLEG): container finished" podID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerID="34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313" exitCode=0 Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.439656 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerDied","Data":"34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313"} Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.439694 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2wkdb" event={"ID":"74763d9d-b2ce-41bb-9f17-4ec6f349134a","Type":"ContainerDied","Data":"bf3715c51ea6d4bf48e54a50914f8129fa537d17b01c3f9f691b94f5637ded25"} Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.439721 4701 scope.go:117] "RemoveContainer" containerID="34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.439818 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2wkdb" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.482989 4701 scope.go:117] "RemoveContainer" containerID="3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.536608 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2wkdb"] Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.538078 4701 scope.go:117] "RemoveContainer" containerID="3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.553269 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2wkdb"] Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.584530 4701 scope.go:117] "RemoveContainer" containerID="34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313" Nov 21 19:46:46 crc kubenswrapper[4701]: E1121 19:46:46.585048 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313\": container with ID starting with 34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313 not found: ID does not exist" containerID="34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.585098 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313"} err="failed to get container status \"34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313\": rpc error: code = NotFound desc = could not find container \"34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313\": container with ID starting with 34e74e6416900092cd47b30251d2555821513ef755bbb34cdc0df113c1eb5313 not found: ID does not exist" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.585147 4701 scope.go:117] "RemoveContainer" containerID="3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a" Nov 21 19:46:46 crc kubenswrapper[4701]: E1121 19:46:46.585690 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a\": container with ID starting with 3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a not found: ID does not exist" containerID="3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.585740 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a"} err="failed to get container status \"3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a\": rpc error: code = NotFound desc = could not find container \"3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a\": container with ID starting with 3a3c788de75474c93a06653edc46abc5d4855577230a155c9edbac13abcc6d8a not found: ID does not exist" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.585772 4701 scope.go:117] "RemoveContainer" containerID="3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56" Nov 21 19:46:46 crc kubenswrapper[4701]: E1121 19:46:46.586031 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56\": container with ID starting with 3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56 not found: ID does not exist" containerID="3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56" Nov 21 19:46:46 crc kubenswrapper[4701]: I1121 19:46:46.586054 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56"} err="failed to get container status \"3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56\": rpc error: code = NotFound desc = could not find container \"3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56\": container with ID starting with 3801b324bf06b8b0466b800a3ba481520be42f471acb51e1348e915e1a681f56 not found: ID does not exist" Nov 21 19:46:47 crc kubenswrapper[4701]: I1121 19:46:47.973663 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" path="/var/lib/kubelet/pods/74763d9d-b2ce-41bb-9f17-4ec6f349134a/volumes" Nov 21 19:47:05 crc kubenswrapper[4701]: I1121 19:47:05.699621 4701 generic.go:334] "Generic (PLEG): container finished" podID="a50527e7-3b38-471d-a03d-937e88e019f3" containerID="137873829e7f00f9aff410a4049f1cb1779fba078380d1e17acc630ca5ebfca4" exitCode=0 Nov 21 19:47:05 crc kubenswrapper[4701]: I1121 19:47:05.699746 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" event={"ID":"a50527e7-3b38-471d-a03d-937e88e019f3","Type":"ContainerDied","Data":"137873829e7f00f9aff410a4049f1cb1779fba078380d1e17acc630ca5ebfca4"} Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.275427 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351069 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-combined-ca-bundle\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351121 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-1\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351276 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-0\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351420 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-1\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351446 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a50527e7-3b38-471d-a03d-937e88e019f3-nova-extra-config-0\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351518 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6z9z\" (UniqueName: \"kubernetes.io/projected/a50527e7-3b38-471d-a03d-937e88e019f3-kube-api-access-j6z9z\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351546 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-ssh-key\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351615 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-0\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.351650 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-inventory\") pod \"a50527e7-3b38-471d-a03d-937e88e019f3\" (UID: \"a50527e7-3b38-471d-a03d-937e88e019f3\") " Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.369787 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.372397 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a50527e7-3b38-471d-a03d-937e88e019f3-kube-api-access-j6z9z" (OuterVolumeSpecName: "kube-api-access-j6z9z") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "kube-api-access-j6z9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.394901 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a50527e7-3b38-471d-a03d-937e88e019f3-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.397036 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-inventory" (OuterVolumeSpecName: "inventory") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.399992 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.407440 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.409093 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.411299 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.425137 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "a50527e7-3b38-471d-a03d-937e88e019f3" (UID: "a50527e7-3b38-471d-a03d-937e88e019f3"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454736 4701 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454784 4701 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a50527e7-3b38-471d-a03d-937e88e019f3-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454794 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6z9z\" (UniqueName: \"kubernetes.io/projected/a50527e7-3b38-471d-a03d-937e88e019f3-kube-api-access-j6z9z\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454805 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454815 4701 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454825 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454836 4701 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454848 4701 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.454858 4701 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a50527e7-3b38-471d-a03d-937e88e019f3-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.729686 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" event={"ID":"a50527e7-3b38-471d-a03d-937e88e019f3","Type":"ContainerDied","Data":"947a26a0cd458dae8ee84cc79b9a2de46e1be93fe202a7e72312e5cc11c0050b"} Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.730277 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="947a26a0cd458dae8ee84cc79b9a2de46e1be93fe202a7e72312e5cc11c0050b" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.729766 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rhwkh" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.985994 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r"] Nov 21 19:47:07 crc kubenswrapper[4701]: E1121 19:47:07.986843 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="extract-content" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.986869 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="extract-content" Nov 21 19:47:07 crc kubenswrapper[4701]: E1121 19:47:07.986903 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="registry-server" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.986917 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="registry-server" Nov 21 19:47:07 crc kubenswrapper[4701]: E1121 19:47:07.986949 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a50527e7-3b38-471d-a03d-937e88e019f3" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.986964 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a50527e7-3b38-471d-a03d-937e88e019f3" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 21 19:47:07 crc kubenswrapper[4701]: E1121 19:47:07.986979 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="extract-utilities" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.986991 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="extract-utilities" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.987402 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="74763d9d-b2ce-41bb-9f17-4ec6f349134a" containerName="registry-server" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.987448 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a50527e7-3b38-471d-a03d-937e88e019f3" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.988924 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.994827 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hqsfp" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.994856 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.994827 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.996720 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 21 19:47:07 crc kubenswrapper[4701]: I1121 19:47:07.998931 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.007913 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r"] Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.069573 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.069760 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.069824 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.069888 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.069930 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.069978 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.070231 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltldw\" (UniqueName: \"kubernetes.io/projected/4f066a0d-46d8-4cfd-b188-495f77c256f1-kube-api-access-ltldw\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172255 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172354 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172416 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172449 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltldw\" (UniqueName: \"kubernetes.io/projected/4f066a0d-46d8-4cfd-b188-495f77c256f1-kube-api-access-ltldw\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172519 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172580 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.172624 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.176918 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.176918 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.177368 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.177777 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.179165 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.185964 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.197170 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltldw\" (UniqueName: \"kubernetes.io/projected/4f066a0d-46d8-4cfd-b188-495f77c256f1-kube-api-access-ltldw\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-97f7r\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:08 crc kubenswrapper[4701]: I1121 19:47:08.328967 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:47:09 crc kubenswrapper[4701]: I1121 19:47:09.025928 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r"] Nov 21 19:47:09 crc kubenswrapper[4701]: I1121 19:47:09.792899 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" event={"ID":"4f066a0d-46d8-4cfd-b188-495f77c256f1","Type":"ContainerStarted","Data":"2f1e2a60b59fa1f94114451e7fb9f9e14cadc972308413c80223773d0d85fb5d"} Nov 21 19:47:10 crc kubenswrapper[4701]: I1121 19:47:10.806840 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" event={"ID":"4f066a0d-46d8-4cfd-b188-495f77c256f1","Type":"ContainerStarted","Data":"5792f9518d2ae6501ef4c7dcc655f763c82a7678164c6ac72ddb21af20d17c77"} Nov 21 19:47:10 crc kubenswrapper[4701]: I1121 19:47:10.847828 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" podStartSLOduration=3.458042927 podStartE2EDuration="3.847794454s" podCreationTimestamp="2025-11-21 19:47:07 +0000 UTC" firstStartedPulling="2025-11-21 19:47:09.032737081 +0000 UTC m=+2719.817877108" lastFinishedPulling="2025-11-21 19:47:09.422488568 +0000 UTC m=+2720.207628635" observedRunningTime="2025-11-21 19:47:10.846409726 +0000 UTC m=+2721.631549753" watchObservedRunningTime="2025-11-21 19:47:10.847794454 +0000 UTC m=+2721.632934521" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.164797 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kt8qj"] Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.169153 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.203391 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kt8qj"] Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.269534 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-catalog-content\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.269628 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-utilities\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.269751 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8d7s\" (UniqueName: \"kubernetes.io/projected/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-kube-api-access-p8d7s\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.372130 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8d7s\" (UniqueName: \"kubernetes.io/projected/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-kube-api-access-p8d7s\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.372319 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-catalog-content\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.372409 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-utilities\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.373120 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-utilities\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.373120 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-catalog-content\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.395514 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8d7s\" (UniqueName: \"kubernetes.io/projected/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-kube-api-access-p8d7s\") pod \"certified-operators-kt8qj\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:11 crc kubenswrapper[4701]: I1121 19:47:11.510680 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:12 crc kubenswrapper[4701]: I1121 19:47:12.117170 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kt8qj"] Nov 21 19:47:12 crc kubenswrapper[4701]: I1121 19:47:12.870009 4701 generic.go:334] "Generic (PLEG): container finished" podID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerID="c6ed90ecb4f49d313c8b9abba56e4a635b5c6403613764d22bbd1a6708a2f91e" exitCode=0 Nov 21 19:47:12 crc kubenswrapper[4701]: I1121 19:47:12.870137 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerDied","Data":"c6ed90ecb4f49d313c8b9abba56e4a635b5c6403613764d22bbd1a6708a2f91e"} Nov 21 19:47:12 crc kubenswrapper[4701]: I1121 19:47:12.870577 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerStarted","Data":"bc5429eae391821fd1602fb85fcb438ad5162b80db657051226a107df5ef2388"} Nov 21 19:47:13 crc kubenswrapper[4701]: I1121 19:47:13.883467 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerStarted","Data":"05e953564e6107afc151eb5c64f52ef88464a1150f4629fcda5b7d000eaf49a7"} Nov 21 19:47:14 crc kubenswrapper[4701]: I1121 19:47:14.897823 4701 generic.go:334] "Generic (PLEG): container finished" podID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerID="05e953564e6107afc151eb5c64f52ef88464a1150f4629fcda5b7d000eaf49a7" exitCode=0 Nov 21 19:47:14 crc kubenswrapper[4701]: I1121 19:47:14.897940 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerDied","Data":"05e953564e6107afc151eb5c64f52ef88464a1150f4629fcda5b7d000eaf49a7"} Nov 21 19:47:15 crc kubenswrapper[4701]: I1121 19:47:15.919342 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerStarted","Data":"83bf3770ae2a73218c5bf3b56cf54794ba1230fcf567c313281dd7a8ca94641c"} Nov 21 19:47:15 crc kubenswrapper[4701]: I1121 19:47:15.942954 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kt8qj" podStartSLOduration=2.348305166 podStartE2EDuration="4.942932254s" podCreationTimestamp="2025-11-21 19:47:11 +0000 UTC" firstStartedPulling="2025-11-21 19:47:12.873562681 +0000 UTC m=+2723.658702708" lastFinishedPulling="2025-11-21 19:47:15.468189749 +0000 UTC m=+2726.253329796" observedRunningTime="2025-11-21 19:47:15.940399625 +0000 UTC m=+2726.725539672" watchObservedRunningTime="2025-11-21 19:47:15.942932254 +0000 UTC m=+2726.728072281" Nov 21 19:47:18 crc kubenswrapper[4701]: I1121 19:47:18.613829 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:47:18 crc kubenswrapper[4701]: I1121 19:47:18.614328 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:47:21 crc kubenswrapper[4701]: I1121 19:47:21.511768 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:21 crc kubenswrapper[4701]: I1121 19:47:21.513016 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:21 crc kubenswrapper[4701]: I1121 19:47:21.591079 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:22 crc kubenswrapper[4701]: I1121 19:47:22.102675 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:22 crc kubenswrapper[4701]: I1121 19:47:22.170716 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kt8qj"] Nov 21 19:47:24 crc kubenswrapper[4701]: I1121 19:47:24.028267 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kt8qj" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="registry-server" containerID="cri-o://83bf3770ae2a73218c5bf3b56cf54794ba1230fcf567c313281dd7a8ca94641c" gracePeriod=2 Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.042223 4701 generic.go:334] "Generic (PLEG): container finished" podID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerID="83bf3770ae2a73218c5bf3b56cf54794ba1230fcf567c313281dd7a8ca94641c" exitCode=0 Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.042333 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerDied","Data":"83bf3770ae2a73218c5bf3b56cf54794ba1230fcf567c313281dd7a8ca94641c"} Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.042709 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8qj" event={"ID":"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f","Type":"ContainerDied","Data":"bc5429eae391821fd1602fb85fcb438ad5162b80db657051226a107df5ef2388"} Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.042730 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc5429eae391821fd1602fb85fcb438ad5162b80db657051226a107df5ef2388" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.062842 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.122864 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-utilities\") pod \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.122957 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8d7s\" (UniqueName: \"kubernetes.io/projected/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-kube-api-access-p8d7s\") pod \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.123121 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-catalog-content\") pod \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\" (UID: \"ab7cd4c1-ae18-4a34-a999-59d1cb54c28f\") " Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.123973 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-utilities" (OuterVolumeSpecName: "utilities") pod "ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" (UID: "ab7cd4c1-ae18-4a34-a999-59d1cb54c28f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.131258 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-kube-api-access-p8d7s" (OuterVolumeSpecName: "kube-api-access-p8d7s") pod "ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" (UID: "ab7cd4c1-ae18-4a34-a999-59d1cb54c28f"). InnerVolumeSpecName "kube-api-access-p8d7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.195072 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" (UID: "ab7cd4c1-ae18-4a34-a999-59d1cb54c28f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.226351 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.226403 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8d7s\" (UniqueName: \"kubernetes.io/projected/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-kube-api-access-p8d7s\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:25 crc kubenswrapper[4701]: I1121 19:47:25.226423 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:47:26 crc kubenswrapper[4701]: I1121 19:47:26.058264 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8qj" Nov 21 19:47:26 crc kubenswrapper[4701]: I1121 19:47:26.094598 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kt8qj"] Nov 21 19:47:26 crc kubenswrapper[4701]: I1121 19:47:26.105643 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kt8qj"] Nov 21 19:47:27 crc kubenswrapper[4701]: I1121 19:47:27.972011 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" path="/var/lib/kubelet/pods/ab7cd4c1-ae18-4a34-a999-59d1cb54c28f/volumes" Nov 21 19:47:48 crc kubenswrapper[4701]: I1121 19:47:48.614418 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:47:48 crc kubenswrapper[4701]: I1121 19:47:48.615591 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.613987 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.614826 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.614899 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.615833 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1da90685f868faf9a72c4d22ec36474fb36df1e592760b3aafb20f1467e23cc0"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.615928 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://1da90685f868faf9a72c4d22ec36474fb36df1e592760b3aafb20f1467e23cc0" gracePeriod=600 Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.788794 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="1da90685f868faf9a72c4d22ec36474fb36df1e592760b3aafb20f1467e23cc0" exitCode=0 Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.788864 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"1da90685f868faf9a72c4d22ec36474fb36df1e592760b3aafb20f1467e23cc0"} Nov 21 19:48:18 crc kubenswrapper[4701]: I1121 19:48:18.788919 4701 scope.go:117] "RemoveContainer" containerID="9293b5a2479f8a37b25cfffe8c63f12e41d202a81f7c28415703d6e5f527b560" Nov 21 19:48:19 crc kubenswrapper[4701]: I1121 19:48:19.808511 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da"} Nov 21 19:49:48 crc kubenswrapper[4701]: I1121 19:49:48.978652 4701 generic.go:334] "Generic (PLEG): container finished" podID="4f066a0d-46d8-4cfd-b188-495f77c256f1" containerID="5792f9518d2ae6501ef4c7dcc655f763c82a7678164c6ac72ddb21af20d17c77" exitCode=0 Nov 21 19:49:48 crc kubenswrapper[4701]: I1121 19:49:48.978755 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" event={"ID":"4f066a0d-46d8-4cfd-b188-495f77c256f1","Type":"ContainerDied","Data":"5792f9518d2ae6501ef4c7dcc655f763c82a7678164c6ac72ddb21af20d17c77"} Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.531381 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.617622 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ssh-key\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.617734 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltldw\" (UniqueName: \"kubernetes.io/projected/4f066a0d-46d8-4cfd-b188-495f77c256f1-kube-api-access-ltldw\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.617845 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-inventory\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.617934 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-telemetry-combined-ca-bundle\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.617968 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-1\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.618072 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-0\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.618095 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-2\") pod \"4f066a0d-46d8-4cfd-b188-495f77c256f1\" (UID: \"4f066a0d-46d8-4cfd-b188-495f77c256f1\") " Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.628886 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.628960 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f066a0d-46d8-4cfd-b188-495f77c256f1-kube-api-access-ltldw" (OuterVolumeSpecName: "kube-api-access-ltldw") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "kube-api-access-ltldw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.661855 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-inventory" (OuterVolumeSpecName: "inventory") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.666421 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.666831 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.678857 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.701184 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "4f066a0d-46d8-4cfd-b188-495f77c256f1" (UID: "4f066a0d-46d8-4cfd-b188-495f77c256f1"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722140 4701 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722229 4701 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722251 4701 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722273 4701 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722301 4701 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722322 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f066a0d-46d8-4cfd-b188-495f77c256f1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:50 crc kubenswrapper[4701]: I1121 19:49:50.722340 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltldw\" (UniqueName: \"kubernetes.io/projected/4f066a0d-46d8-4cfd-b188-495f77c256f1-kube-api-access-ltldw\") on node \"crc\" DevicePath \"\"" Nov 21 19:49:51 crc kubenswrapper[4701]: I1121 19:49:51.011346 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" event={"ID":"4f066a0d-46d8-4cfd-b188-495f77c256f1","Type":"ContainerDied","Data":"2f1e2a60b59fa1f94114451e7fb9f9e14cadc972308413c80223773d0d85fb5d"} Nov 21 19:49:51 crc kubenswrapper[4701]: I1121 19:49:51.011423 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f1e2a60b59fa1f94114451e7fb9f9e14cadc972308413c80223773d0d85fb5d" Nov 21 19:49:51 crc kubenswrapper[4701]: I1121 19:49:51.011490 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-97f7r" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.040878 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7lrfk"] Nov 21 19:49:55 crc kubenswrapper[4701]: E1121 19:49:55.042778 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f066a0d-46d8-4cfd-b188-495f77c256f1" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.042805 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f066a0d-46d8-4cfd-b188-495f77c256f1" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 21 19:49:55 crc kubenswrapper[4701]: E1121 19:49:55.042846 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="extract-content" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.042857 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="extract-content" Nov 21 19:49:55 crc kubenswrapper[4701]: E1121 19:49:55.042878 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="extract-utilities" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.042889 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="extract-utilities" Nov 21 19:49:55 crc kubenswrapper[4701]: E1121 19:49:55.042935 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="registry-server" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.042945 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="registry-server" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.043287 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f066a0d-46d8-4cfd-b188-495f77c256f1" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.043340 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab7cd4c1-ae18-4a34-a999-59d1cb54c28f" containerName="registry-server" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.046003 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.068521 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-utilities\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.068614 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-catalog-content\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.068868 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-587vr\" (UniqueName: \"kubernetes.io/projected/9112ce0f-ffb2-4a5f-b354-d04251a94680-kube-api-access-587vr\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.075154 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7lrfk"] Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.171544 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-587vr\" (UniqueName: \"kubernetes.io/projected/9112ce0f-ffb2-4a5f-b354-d04251a94680-kube-api-access-587vr\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.171705 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-utilities\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.171759 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-catalog-content\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.172650 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-catalog-content\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.172792 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-utilities\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.197109 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-587vr\" (UniqueName: \"kubernetes.io/projected/9112ce0f-ffb2-4a5f-b354-d04251a94680-kube-api-access-587vr\") pod \"redhat-marketplace-7lrfk\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.422602 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:49:55 crc kubenswrapper[4701]: I1121 19:49:55.920423 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7lrfk"] Nov 21 19:49:56 crc kubenswrapper[4701]: I1121 19:49:56.087176 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7lrfk" event={"ID":"9112ce0f-ffb2-4a5f-b354-d04251a94680","Type":"ContainerStarted","Data":"515bbea44e7f4c17234b959ba8e128a2ef6de94c67181c015569372fee61ab62"} Nov 21 19:49:57 crc kubenswrapper[4701]: I1121 19:49:57.103355 4701 generic.go:334] "Generic (PLEG): container finished" podID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerID="14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab" exitCode=0 Nov 21 19:49:57 crc kubenswrapper[4701]: I1121 19:49:57.103471 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7lrfk" event={"ID":"9112ce0f-ffb2-4a5f-b354-d04251a94680","Type":"ContainerDied","Data":"14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab"} Nov 21 19:49:57 crc kubenswrapper[4701]: I1121 19:49:57.108783 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:49:59 crc kubenswrapper[4701]: I1121 19:49:59.135528 4701 generic.go:334] "Generic (PLEG): container finished" podID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerID="e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58" exitCode=0 Nov 21 19:49:59 crc kubenswrapper[4701]: I1121 19:49:59.135596 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7lrfk" event={"ID":"9112ce0f-ffb2-4a5f-b354-d04251a94680","Type":"ContainerDied","Data":"e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58"} Nov 21 19:50:00 crc kubenswrapper[4701]: I1121 19:50:00.158937 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7lrfk" event={"ID":"9112ce0f-ffb2-4a5f-b354-d04251a94680","Type":"ContainerStarted","Data":"c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a"} Nov 21 19:50:00 crc kubenswrapper[4701]: I1121 19:50:00.197486 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7lrfk" podStartSLOduration=2.739275001 podStartE2EDuration="5.197416512s" podCreationTimestamp="2025-11-21 19:49:55 +0000 UTC" firstStartedPulling="2025-11-21 19:49:57.108367013 +0000 UTC m=+2887.893507060" lastFinishedPulling="2025-11-21 19:49:59.566508504 +0000 UTC m=+2890.351648571" observedRunningTime="2025-11-21 19:50:00.18998571 +0000 UTC m=+2890.975125767" watchObservedRunningTime="2025-11-21 19:50:00.197416512 +0000 UTC m=+2890.982556579" Nov 21 19:50:05 crc kubenswrapper[4701]: I1121 19:50:05.423503 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:50:05 crc kubenswrapper[4701]: I1121 19:50:05.424496 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:50:05 crc kubenswrapper[4701]: I1121 19:50:05.496541 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:50:06 crc kubenswrapper[4701]: I1121 19:50:06.340156 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:50:06 crc kubenswrapper[4701]: I1121 19:50:06.448300 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7lrfk"] Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.289077 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7lrfk" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="registry-server" containerID="cri-o://c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a" gracePeriod=2 Nov 21 19:50:08 crc kubenswrapper[4701]: E1121 19:50:08.484352 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9112ce0f_ffb2_4a5f_b354_d04251a94680.slice/crio-c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9112ce0f_ffb2_4a5f_b354_d04251a94680.slice/crio-conmon-c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a.scope\": RecentStats: unable to find data in memory cache]" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.836481 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.877135 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-catalog-content\") pod \"9112ce0f-ffb2-4a5f-b354-d04251a94680\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.877313 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-587vr\" (UniqueName: \"kubernetes.io/projected/9112ce0f-ffb2-4a5f-b354-d04251a94680-kube-api-access-587vr\") pod \"9112ce0f-ffb2-4a5f-b354-d04251a94680\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.877581 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-utilities\") pod \"9112ce0f-ffb2-4a5f-b354-d04251a94680\" (UID: \"9112ce0f-ffb2-4a5f-b354-d04251a94680\") " Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.878692 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-utilities" (OuterVolumeSpecName: "utilities") pod "9112ce0f-ffb2-4a5f-b354-d04251a94680" (UID: "9112ce0f-ffb2-4a5f-b354-d04251a94680"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.879006 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.889782 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9112ce0f-ffb2-4a5f-b354-d04251a94680-kube-api-access-587vr" (OuterVolumeSpecName: "kube-api-access-587vr") pod "9112ce0f-ffb2-4a5f-b354-d04251a94680" (UID: "9112ce0f-ffb2-4a5f-b354-d04251a94680"). InnerVolumeSpecName "kube-api-access-587vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.901193 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9112ce0f-ffb2-4a5f-b354-d04251a94680" (UID: "9112ce0f-ffb2-4a5f-b354-d04251a94680"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.983660 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9112ce0f-ffb2-4a5f-b354-d04251a94680-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:50:08 crc kubenswrapper[4701]: I1121 19:50:08.983767 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-587vr\" (UniqueName: \"kubernetes.io/projected/9112ce0f-ffb2-4a5f-b354-d04251a94680-kube-api-access-587vr\") on node \"crc\" DevicePath \"\"" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.303960 4701 generic.go:334] "Generic (PLEG): container finished" podID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerID="c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a" exitCode=0 Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.304019 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7lrfk" event={"ID":"9112ce0f-ffb2-4a5f-b354-d04251a94680","Type":"ContainerDied","Data":"c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a"} Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.304065 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7lrfk" event={"ID":"9112ce0f-ffb2-4a5f-b354-d04251a94680","Type":"ContainerDied","Data":"515bbea44e7f4c17234b959ba8e128a2ef6de94c67181c015569372fee61ab62"} Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.304090 4701 scope.go:117] "RemoveContainer" containerID="c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.306514 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7lrfk" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.341600 4701 scope.go:117] "RemoveContainer" containerID="e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.356326 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7lrfk"] Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.370009 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7lrfk"] Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.396701 4701 scope.go:117] "RemoveContainer" containerID="14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.461488 4701 scope.go:117] "RemoveContainer" containerID="c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a" Nov 21 19:50:09 crc kubenswrapper[4701]: E1121 19:50:09.477122 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a\": container with ID starting with c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a not found: ID does not exist" containerID="c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.477190 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a"} err="failed to get container status \"c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a\": rpc error: code = NotFound desc = could not find container \"c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a\": container with ID starting with c09e1faee803cf070d1e223aad8fea37162fbb784374dd9d76dff637bd62aa5a not found: ID does not exist" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.477244 4701 scope.go:117] "RemoveContainer" containerID="e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58" Nov 21 19:50:09 crc kubenswrapper[4701]: E1121 19:50:09.478053 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58\": container with ID starting with e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58 not found: ID does not exist" containerID="e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.478089 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58"} err="failed to get container status \"e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58\": rpc error: code = NotFound desc = could not find container \"e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58\": container with ID starting with e47540d58fbdb4fbbc483e279470ad37dc5415becbf93b5ed21c657a9b2f0f58 not found: ID does not exist" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.478109 4701 scope.go:117] "RemoveContainer" containerID="14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab" Nov 21 19:50:09 crc kubenswrapper[4701]: E1121 19:50:09.478811 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab\": container with ID starting with 14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab not found: ID does not exist" containerID="14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.478842 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab"} err="failed to get container status \"14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab\": rpc error: code = NotFound desc = could not find container \"14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab\": container with ID starting with 14da5bf4ec94ea00f3b59cc2bd11559c0d4b9861baf4dee45e4a191df2ca40ab not found: ID does not exist" Nov 21 19:50:09 crc kubenswrapper[4701]: I1121 19:50:09.970518 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" path="/var/lib/kubelet/pods/9112ce0f-ffb2-4a5f-b354-d04251a94680/volumes" Nov 21 19:50:18 crc kubenswrapper[4701]: I1121 19:50:18.613184 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:50:18 crc kubenswrapper[4701]: I1121 19:50:18.614072 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.394095 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 21 19:50:28 crc kubenswrapper[4701]: E1121 19:50:28.395638 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="extract-content" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.395660 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="extract-content" Nov 21 19:50:28 crc kubenswrapper[4701]: E1121 19:50:28.395694 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="extract-utilities" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.395701 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="extract-utilities" Nov 21 19:50:28 crc kubenswrapper[4701]: E1121 19:50:28.395725 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="registry-server" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.395733 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="registry-server" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.396030 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9112ce0f-ffb2-4a5f-b354-d04251a94680" containerName="registry-server" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.397755 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.403988 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.423488 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500141 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-config-data\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500389 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45qhm\" (UniqueName: \"kubernetes.io/projected/9c177319-2b64-45dd-a3df-47ccaca79da4-kube-api-access-45qhm\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500460 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500550 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-lib-modules\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500582 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-dev\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500693 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500721 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500799 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500857 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500918 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-scripts\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.500995 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-run\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.501132 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.501193 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.501285 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-sys\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.501373 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.507218 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.509739 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.514771 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-2-config-data" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.531935 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.591679 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.594153 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.597511 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-config-data" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.605905 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.605952 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.605974 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.605999 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606020 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606052 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606074 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-scripts\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606111 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-run\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606137 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606175 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606211 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606232 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606252 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606279 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606295 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-sys\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.606322 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607242 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-run\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607425 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607464 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607483 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607635 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-sys\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607625 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607750 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-config-data\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607798 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607811 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.607845 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608060 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608426 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45qhm\" (UniqueName: \"kubernetes.io/projected/9c177319-2b64-45dd-a3df-47ccaca79da4-kube-api-access-45qhm\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608468 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608520 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608615 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-lib-modules\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608642 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-dev\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608694 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkmbk\" (UniqueName: \"kubernetes.io/projected/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-kube-api-access-qkmbk\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608727 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608774 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.608797 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.609344 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.609782 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.609829 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-lib-modules\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.609852 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-dev\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.616318 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-scripts\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.616966 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-config-data\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.619454 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.622858 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c177319-2b64-45dd-a3df-47ccaca79da4-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.627366 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9c177319-2b64-45dd-a3df-47ccaca79da4-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.634470 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45qhm\" (UniqueName: \"kubernetes.io/projected/9c177319-2b64-45dd-a3df-47ccaca79da4-kube-api-access-45qhm\") pod \"cinder-backup-0\" (UID: \"9c177319-2b64-45dd-a3df-47ccaca79da4\") " pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710229 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710278 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710311 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kx87\" (UniqueName: \"kubernetes.io/projected/fbd75a47-d6fb-407b-bf7a-9750b745b820-kube-api-access-4kx87\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710332 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-sys\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710359 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710447 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710532 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710611 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-dev\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710699 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710808 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710867 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710930 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.710955 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711027 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711075 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711104 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711159 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711192 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711242 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711290 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711326 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-run\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711382 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711440 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711467 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711543 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711602 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711671 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkmbk\" (UniqueName: \"kubernetes.io/projected/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-kube-api-access-qkmbk\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711699 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711786 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711816 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711853 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.711873 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.712029 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.712083 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.712122 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.712738 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.713113 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.713364 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.713583 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.713736 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.717536 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.718339 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.720377 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.722063 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.723790 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.744848 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkmbk\" (UniqueName: \"kubernetes.io/projected/c2b29617-ccfa-4b0c-be12-52e9e7e06c33-kube-api-access-qkmbk\") pod \"cinder-volume-nfs-2-0\" (UID: \"c2b29617-ccfa-4b0c-be12-52e9e7e06c33\") " pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814067 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814148 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814172 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814231 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814315 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814320 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814250 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814420 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814550 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814600 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814680 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814727 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-run\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.814978 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815147 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815342 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815351 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815444 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kx87\" (UniqueName: \"kubernetes.io/projected/fbd75a47-d6fb-407b-bf7a-9750b745b820-kube-api-access-4kx87\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815482 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-sys\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815478 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815520 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-sys\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815542 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-dev\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815566 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815637 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815894 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-run\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.815926 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/fbd75a47-d6fb-407b-bf7a-9750b745b820-dev\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.820577 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.821901 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.823221 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.828245 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbd75a47-d6fb-407b-bf7a-9750b745b820-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.833909 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:28 crc kubenswrapper[4701]: I1121 19:50:28.840252 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kx87\" (UniqueName: \"kubernetes.io/projected/fbd75a47-d6fb-407b-bf7a-9750b745b820-kube-api-access-4kx87\") pod \"cinder-volume-nfs-0\" (UID: \"fbd75a47-d6fb-407b-bf7a-9750b745b820\") " pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:29 crc kubenswrapper[4701]: I1121 19:50:29.004311 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:29 crc kubenswrapper[4701]: I1121 19:50:29.311856 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 21 19:50:29 crc kubenswrapper[4701]: I1121 19:50:29.463744 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 21 19:50:29 crc kubenswrapper[4701]: W1121 19:50:29.469556 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2b29617_ccfa_4b0c_be12_52e9e7e06c33.slice/crio-16bb4b90270fda9547788c52e190e64b2d5fb085a3eb428e878067808f070f96 WatchSource:0}: Error finding container 16bb4b90270fda9547788c52e190e64b2d5fb085a3eb428e878067808f070f96: Status 404 returned error can't find the container with id 16bb4b90270fda9547788c52e190e64b2d5fb085a3eb428e878067808f070f96 Nov 21 19:50:29 crc kubenswrapper[4701]: I1121 19:50:29.609870 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9c177319-2b64-45dd-a3df-47ccaca79da4","Type":"ContainerStarted","Data":"9f6261fbc4e808c17d63535ff80e06b5b726a069aa2d2853ff9e0f00e9cd0a94"} Nov 21 19:50:29 crc kubenswrapper[4701]: I1121 19:50:29.624588 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"c2b29617-ccfa-4b0c-be12-52e9e7e06c33","Type":"ContainerStarted","Data":"16bb4b90270fda9547788c52e190e64b2d5fb085a3eb428e878067808f070f96"} Nov 21 19:50:29 crc kubenswrapper[4701]: I1121 19:50:29.709028 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.660907 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9c177319-2b64-45dd-a3df-47ccaca79da4","Type":"ContainerStarted","Data":"94ebd71194392b0410d8286207e7c68adf236bbb9184c203dac192fdfc8267d7"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.666073 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9c177319-2b64-45dd-a3df-47ccaca79da4","Type":"ContainerStarted","Data":"c5f9f5ce649ab521a567041ebb4131858b80e59cad1b5fe865cad709270730ae"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.674151 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"c2b29617-ccfa-4b0c-be12-52e9e7e06c33","Type":"ContainerStarted","Data":"a65b406740756b5032540f207b4f345a73d44da48950647a0b1d03926a187d5d"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.674240 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"c2b29617-ccfa-4b0c-be12-52e9e7e06c33","Type":"ContainerStarted","Data":"064ed6a3cfc704f9299a7e892d73fcc39e67a30c1fb41431d3934311626d5df6"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.686212 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"fbd75a47-d6fb-407b-bf7a-9750b745b820","Type":"ContainerStarted","Data":"1443a2a3e1864ecab1a4c3360fe9ef88a96f85954b61b20f0892b0e93b2528b6"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.686292 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"fbd75a47-d6fb-407b-bf7a-9750b745b820","Type":"ContainerStarted","Data":"23fb877c5a6b6bf99d8c51d839c3e0e61d9b2815f5a13f4b5faa7b208fa06e2f"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.686304 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"fbd75a47-d6fb-407b-bf7a-9750b745b820","Type":"ContainerStarted","Data":"79db729c517d399cd356a63754ca0bb60e8ce69a8dfc8c474bc14e70ec661fe4"} Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.696423 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.511239455 podStartE2EDuration="2.696402544s" podCreationTimestamp="2025-11-21 19:50:28 +0000 UTC" firstStartedPulling="2025-11-21 19:50:29.317401076 +0000 UTC m=+2920.102541103" lastFinishedPulling="2025-11-21 19:50:29.502564155 +0000 UTC m=+2920.287704192" observedRunningTime="2025-11-21 19:50:30.691723036 +0000 UTC m=+2921.476863093" watchObservedRunningTime="2025-11-21 19:50:30.696402544 +0000 UTC m=+2921.481542571" Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.736326 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-2-0" podStartSLOduration=2.404060602 podStartE2EDuration="2.736302797s" podCreationTimestamp="2025-11-21 19:50:28 +0000 UTC" firstStartedPulling="2025-11-21 19:50:29.473073904 +0000 UTC m=+2920.258213941" lastFinishedPulling="2025-11-21 19:50:29.805316109 +0000 UTC m=+2920.590456136" observedRunningTime="2025-11-21 19:50:30.727632822 +0000 UTC m=+2921.512772849" watchObservedRunningTime="2025-11-21 19:50:30.736302797 +0000 UTC m=+2921.521442824" Nov 21 19:50:30 crc kubenswrapper[4701]: I1121 19:50:30.766765 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-0" podStartSLOduration=2.69881136 podStartE2EDuration="2.766736164s" podCreationTimestamp="2025-11-21 19:50:28 +0000 UTC" firstStartedPulling="2025-11-21 19:50:29.740578611 +0000 UTC m=+2920.525718638" lastFinishedPulling="2025-11-21 19:50:29.808503405 +0000 UTC m=+2920.593643442" observedRunningTime="2025-11-21 19:50:30.758560182 +0000 UTC m=+2921.543700209" watchObservedRunningTime="2025-11-21 19:50:30.766736164 +0000 UTC m=+2921.551876201" Nov 21 19:50:33 crc kubenswrapper[4701]: I1121 19:50:33.724231 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 21 19:50:33 crc kubenswrapper[4701]: I1121 19:50:33.836029 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:34 crc kubenswrapper[4701]: I1121 19:50:34.005700 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:38 crc kubenswrapper[4701]: I1121 19:50:38.937808 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 21 19:50:39 crc kubenswrapper[4701]: I1121 19:50:39.112339 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-2-0" Nov 21 19:50:39 crc kubenswrapper[4701]: I1121 19:50:39.284244 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-0" Nov 21 19:50:48 crc kubenswrapper[4701]: I1121 19:50:48.613401 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:50:48 crc kubenswrapper[4701]: I1121 19:50:48.614553 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:51:18 crc kubenswrapper[4701]: I1121 19:51:18.613976 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:51:18 crc kubenswrapper[4701]: I1121 19:51:18.615000 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:51:18 crc kubenswrapper[4701]: I1121 19:51:18.615055 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:51:18 crc kubenswrapper[4701]: I1121 19:51:18.615943 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:51:18 crc kubenswrapper[4701]: I1121 19:51:18.616005 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" gracePeriod=600 Nov 21 19:51:18 crc kubenswrapper[4701]: E1121 19:51:18.738285 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:51:19 crc kubenswrapper[4701]: I1121 19:51:19.420131 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" exitCode=0 Nov 21 19:51:19 crc kubenswrapper[4701]: I1121 19:51:19.420277 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da"} Nov 21 19:51:19 crc kubenswrapper[4701]: I1121 19:51:19.420908 4701 scope.go:117] "RemoveContainer" containerID="1da90685f868faf9a72c4d22ec36474fb36df1e592760b3aafb20f1467e23cc0" Nov 21 19:51:19 crc kubenswrapper[4701]: I1121 19:51:19.421805 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:51:19 crc kubenswrapper[4701]: E1121 19:51:19.422374 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:51:30 crc kubenswrapper[4701]: I1121 19:51:30.953153 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:51:30 crc kubenswrapper[4701]: E1121 19:51:30.954416 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.273841 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.275348 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="prometheus" containerID="cri-o://47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac" gracePeriod=600 Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.275493 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="config-reloader" containerID="cri-o://15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808" gracePeriod=600 Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.275831 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="thanos-sidecar" containerID="cri-o://0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9" gracePeriod=600 Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.580797 4701 generic.go:334] "Generic (PLEG): container finished" podID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerID="0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9" exitCode=0 Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.580849 4701 generic.go:334] "Generic (PLEG): container finished" podID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerID="47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac" exitCode=0 Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.580884 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerDied","Data":"0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9"} Nov 21 19:51:31 crc kubenswrapper[4701]: I1121 19:51:31.580924 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerDied","Data":"47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac"} Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.440622 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514633 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-thanos-prometheus-http-client-file\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514690 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-tls-assets\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514734 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config-out\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514757 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514788 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514817 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-prometheus-metric-storage-rulefiles-0\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.514886 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46sxk\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-kube-api-access-46sxk\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.515019 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.515086 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-secret-combined-ca-bundle\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.516089 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.516184 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.516332 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config\") pod \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\" (UID: \"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad\") " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.516859 4701 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.524851 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.525386 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.528548 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config-out" (OuterVolumeSpecName: "config-out") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.528842 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.528958 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config" (OuterVolumeSpecName: "config") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.529019 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-kube-api-access-46sxk" (OuterVolumeSpecName: "kube-api-access-46sxk") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "kube-api-access-46sxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.534760 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.544771 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.564878 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "pvc-7756673b-01d8-4e24-be57-9b42676a4870". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.606463 4701 generic.go:334] "Generic (PLEG): container finished" podID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerID="15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808" exitCode=0 Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.606521 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerDied","Data":"15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808"} Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.606564 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad","Type":"ContainerDied","Data":"d1deff42d4b13a99d4cced8cd0a2bd65559e9c25d780480996b678f094caf1ba"} Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.606593 4701 scope.go:117] "RemoveContainer" containerID="0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.610281 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619750 4701 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619803 4701 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619847 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") on node \"crc\" " Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619864 4701 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619880 4701 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619891 4701 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config-out\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619901 4701 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619911 4701 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.619924 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46sxk\" (UniqueName: \"kubernetes.io/projected/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-kube-api-access-46sxk\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.657500 4701 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.657822 4701 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7756673b-01d8-4e24-be57-9b42676a4870" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870") on node "crc" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.665631 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config" (OuterVolumeSpecName: "web-config") pod "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" (UID: "c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.721583 4701 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad-web-config\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.721722 4701 reconciler_common.go:293] "Volume detached for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") on node \"crc\" DevicePath \"\"" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.780954 4701 scope.go:117] "RemoveContainer" containerID="15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.806381 4701 scope.go:117] "RemoveContainer" containerID="47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.828608 4701 scope.go:117] "RemoveContainer" containerID="ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.856260 4701 scope.go:117] "RemoveContainer" containerID="0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9" Nov 21 19:51:32 crc kubenswrapper[4701]: E1121 19:51:32.856899 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9\": container with ID starting with 0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9 not found: ID does not exist" containerID="0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.856938 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9"} err="failed to get container status \"0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9\": rpc error: code = NotFound desc = could not find container \"0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9\": container with ID starting with 0d9514b7787bb00a1f71e72ff6c405d96325360daf4f4a233f12bf84830613c9 not found: ID does not exist" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.856972 4701 scope.go:117] "RemoveContainer" containerID="15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808" Nov 21 19:51:32 crc kubenswrapper[4701]: E1121 19:51:32.857506 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808\": container with ID starting with 15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808 not found: ID does not exist" containerID="15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.857577 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808"} err="failed to get container status \"15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808\": rpc error: code = NotFound desc = could not find container \"15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808\": container with ID starting with 15cfac3ccdc40582b6e25ebdc744517c8fb822c06279fd3f7da405da2a54e808 not found: ID does not exist" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.857621 4701 scope.go:117] "RemoveContainer" containerID="47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac" Nov 21 19:51:32 crc kubenswrapper[4701]: E1121 19:51:32.858262 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac\": container with ID starting with 47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac not found: ID does not exist" containerID="47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.858333 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac"} err="failed to get container status \"47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac\": rpc error: code = NotFound desc = could not find container \"47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac\": container with ID starting with 47e6feef494f10df8ce7f66b14bbbed3b00134aa95c4b1618d86721b88c1e0ac not found: ID does not exist" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.858371 4701 scope.go:117] "RemoveContainer" containerID="ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de" Nov 21 19:51:32 crc kubenswrapper[4701]: E1121 19:51:32.858746 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de\": container with ID starting with ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de not found: ID does not exist" containerID="ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.858777 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de"} err="failed to get container status \"ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de\": rpc error: code = NotFound desc = could not find container \"ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de\": container with ID starting with ad988e0923fdfe2974b66f5a9bd3f767da63e391b47281826b2294e70a1007de not found: ID does not exist" Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.956878 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:51:32 crc kubenswrapper[4701]: I1121 19:51:32.971267 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.003615 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:51:33 crc kubenswrapper[4701]: E1121 19:51:33.004573 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="config-reloader" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.004617 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="config-reloader" Nov 21 19:51:33 crc kubenswrapper[4701]: E1121 19:51:33.004652 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="init-config-reloader" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.004665 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="init-config-reloader" Nov 21 19:51:33 crc kubenswrapper[4701]: E1121 19:51:33.004705 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="prometheus" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.004715 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="prometheus" Nov 21 19:51:33 crc kubenswrapper[4701]: E1121 19:51:33.004735 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="thanos-sidecar" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.004743 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="thanos-sidecar" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.005003 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="prometheus" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.005038 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="config-reloader" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.005054 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" containerName="thanos-sidecar" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.007613 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.011835 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.012170 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.015346 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.015493 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.015515 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-p9vfl" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.044648 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.084545 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143003 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlt8d\" (UniqueName: \"kubernetes.io/projected/71c6d659-75fd-4221-8b7e-1496221311fe-kube-api-access-xlt8d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143084 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143124 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/71c6d659-75fd-4221-8b7e-1496221311fe-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143154 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-config\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143224 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/71c6d659-75fd-4221-8b7e-1496221311fe-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143266 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143294 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143424 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143458 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/71c6d659-75fd-4221-8b7e-1496221311fe-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143479 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.143502 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245312 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245375 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/71c6d659-75fd-4221-8b7e-1496221311fe-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245400 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245425 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245504 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlt8d\" (UniqueName: \"kubernetes.io/projected/71c6d659-75fd-4221-8b7e-1496221311fe-kube-api-access-xlt8d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245544 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245566 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/71c6d659-75fd-4221-8b7e-1496221311fe-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245582 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-config\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245615 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/71c6d659-75fd-4221-8b7e-1496221311fe-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245645 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.245668 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.247330 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/71c6d659-75fd-4221-8b7e-1496221311fe-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.249575 4701 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.249712 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cac188d07dbda74642d10d9af8d31d97e15d9a3dab466103b81232fdd62bf350/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.251452 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.251657 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.251862 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/71c6d659-75fd-4221-8b7e-1496221311fe-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.252765 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.253655 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-config\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.253880 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.254188 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/71c6d659-75fd-4221-8b7e-1496221311fe-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.254720 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/71c6d659-75fd-4221-8b7e-1496221311fe-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.269262 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlt8d\" (UniqueName: \"kubernetes.io/projected/71c6d659-75fd-4221-8b7e-1496221311fe-kube-api-access-xlt8d\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.310978 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7756673b-01d8-4e24-be57-9b42676a4870\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7756673b-01d8-4e24-be57-9b42676a4870\") pod \"prometheus-metric-storage-0\" (UID: \"71c6d659-75fd-4221-8b7e-1496221311fe\") " pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.395308 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:33 crc kubenswrapper[4701]: I1121 19:51:33.965021 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad" path="/var/lib/kubelet/pods/c7d6de4b-996c-4ea4-a099-b0c98d7cc3ad/volumes" Nov 21 19:51:34 crc kubenswrapper[4701]: I1121 19:51:34.130299 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 19:51:34 crc kubenswrapper[4701]: I1121 19:51:34.631089 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"71c6d659-75fd-4221-8b7e-1496221311fe","Type":"ContainerStarted","Data":"dc593cca61736485d241d4f762c69194cea543ef184155804b9b76309b36cab0"} Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.250954 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-v7mtc"] Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.254351 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.266736 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v7mtc"] Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.296336 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-catalog-content\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.296429 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-utilities\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.296476 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xg78\" (UniqueName: \"kubernetes.io/projected/768949bb-0548-41c0-9d71-6e34bf09facd-kube-api-access-6xg78\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.398792 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xg78\" (UniqueName: \"kubernetes.io/projected/768949bb-0548-41c0-9d71-6e34bf09facd-kube-api-access-6xg78\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.399027 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-catalog-content\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.399077 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-utilities\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.399694 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-utilities\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.399765 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-catalog-content\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.440334 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xg78\" (UniqueName: \"kubernetes.io/projected/768949bb-0548-41c0-9d71-6e34bf09facd-kube-api-access-6xg78\") pod \"community-operators-v7mtc\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:39 crc kubenswrapper[4701]: I1121 19:51:39.590756 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:40 crc kubenswrapper[4701]: I1121 19:51:40.260729 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v7mtc"] Nov 21 19:51:40 crc kubenswrapper[4701]: W1121 19:51:40.268327 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod768949bb_0548_41c0_9d71_6e34bf09facd.slice/crio-979b2f7f7bf65c6e40c197d45b23318d6f185c6c73787396e33a91ce371923a4 WatchSource:0}: Error finding container 979b2f7f7bf65c6e40c197d45b23318d6f185c6c73787396e33a91ce371923a4: Status 404 returned error can't find the container with id 979b2f7f7bf65c6e40c197d45b23318d6f185c6c73787396e33a91ce371923a4 Nov 21 19:51:40 crc kubenswrapper[4701]: I1121 19:51:40.736177 4701 generic.go:334] "Generic (PLEG): container finished" podID="768949bb-0548-41c0-9d71-6e34bf09facd" containerID="60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835" exitCode=0 Nov 21 19:51:40 crc kubenswrapper[4701]: I1121 19:51:40.736292 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerDied","Data":"60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835"} Nov 21 19:51:40 crc kubenswrapper[4701]: I1121 19:51:40.736403 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerStarted","Data":"979b2f7f7bf65c6e40c197d45b23318d6f185c6c73787396e33a91ce371923a4"} Nov 21 19:51:41 crc kubenswrapper[4701]: I1121 19:51:41.753137 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"71c6d659-75fd-4221-8b7e-1496221311fe","Type":"ContainerStarted","Data":"2c849164ac5a1206eee2c877818f28b0fd8ed0e1958dd552194f5fafb35d0744"} Nov 21 19:51:41 crc kubenswrapper[4701]: I1121 19:51:41.758429 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerStarted","Data":"8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52"} Nov 21 19:51:42 crc kubenswrapper[4701]: I1121 19:51:42.951074 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:51:42 crc kubenswrapper[4701]: E1121 19:51:42.951496 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:51:43 crc kubenswrapper[4701]: I1121 19:51:43.812527 4701 generic.go:334] "Generic (PLEG): container finished" podID="768949bb-0548-41c0-9d71-6e34bf09facd" containerID="8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52" exitCode=0 Nov 21 19:51:43 crc kubenswrapper[4701]: I1121 19:51:43.812597 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerDied","Data":"8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52"} Nov 21 19:51:44 crc kubenswrapper[4701]: I1121 19:51:44.831687 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerStarted","Data":"9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8"} Nov 21 19:51:44 crc kubenswrapper[4701]: I1121 19:51:44.868034 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-v7mtc" podStartSLOduration=2.359235846 podStartE2EDuration="5.868009835s" podCreationTimestamp="2025-11-21 19:51:39 +0000 UTC" firstStartedPulling="2025-11-21 19:51:40.7398441 +0000 UTC m=+2991.524984157" lastFinishedPulling="2025-11-21 19:51:44.248618119 +0000 UTC m=+2995.033758146" observedRunningTime="2025-11-21 19:51:44.856733909 +0000 UTC m=+2995.641873936" watchObservedRunningTime="2025-11-21 19:51:44.868009835 +0000 UTC m=+2995.653149862" Nov 21 19:51:49 crc kubenswrapper[4701]: I1121 19:51:49.591610 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:49 crc kubenswrapper[4701]: I1121 19:51:49.592327 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:50 crc kubenswrapper[4701]: I1121 19:51:50.675116 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-v7mtc" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="registry-server" probeResult="failure" output=< Nov 21 19:51:50 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 19:51:50 crc kubenswrapper[4701]: > Nov 21 19:51:50 crc kubenswrapper[4701]: I1121 19:51:50.920015 4701 generic.go:334] "Generic (PLEG): container finished" podID="71c6d659-75fd-4221-8b7e-1496221311fe" containerID="2c849164ac5a1206eee2c877818f28b0fd8ed0e1958dd552194f5fafb35d0744" exitCode=0 Nov 21 19:51:50 crc kubenswrapper[4701]: I1121 19:51:50.920077 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"71c6d659-75fd-4221-8b7e-1496221311fe","Type":"ContainerDied","Data":"2c849164ac5a1206eee2c877818f28b0fd8ed0e1958dd552194f5fafb35d0744"} Nov 21 19:51:51 crc kubenswrapper[4701]: I1121 19:51:51.937976 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"71c6d659-75fd-4221-8b7e-1496221311fe","Type":"ContainerStarted","Data":"02caaf2d1c02cfb4b16f5700f92dd21fb2d7682947e900b6d7555cf90b95a397"} Nov 21 19:51:54 crc kubenswrapper[4701]: I1121 19:51:54.951546 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:51:54 crc kubenswrapper[4701]: E1121 19:51:54.952788 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:51:57 crc kubenswrapper[4701]: I1121 19:51:57.019456 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"71c6d659-75fd-4221-8b7e-1496221311fe","Type":"ContainerStarted","Data":"d9c535a0e523dcde5bf9050f382506818d3aff08d7d531a1d584699cfe579f4a"} Nov 21 19:51:57 crc kubenswrapper[4701]: I1121 19:51:57.020337 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"71c6d659-75fd-4221-8b7e-1496221311fe","Type":"ContainerStarted","Data":"6f61983742c65001ab383307b1db2193f9b9acaa8f5ece2015a70235abd4f4af"} Nov 21 19:51:57 crc kubenswrapper[4701]: I1121 19:51:57.069468 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=25.069441155 podStartE2EDuration="25.069441155s" podCreationTimestamp="2025-11-21 19:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 19:51:57.063776602 +0000 UTC m=+3007.848916669" watchObservedRunningTime="2025-11-21 19:51:57.069441155 +0000 UTC m=+3007.854581202" Nov 21 19:51:58 crc kubenswrapper[4701]: I1121 19:51:58.396945 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 21 19:51:59 crc kubenswrapper[4701]: I1121 19:51:59.691083 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:59 crc kubenswrapper[4701]: I1121 19:51:59.779678 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:51:59 crc kubenswrapper[4701]: I1121 19:51:59.974334 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v7mtc"] Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.103588 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-v7mtc" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="registry-server" containerID="cri-o://9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8" gracePeriod=2 Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.748168 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.919066 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-catalog-content\") pod \"768949bb-0548-41c0-9d71-6e34bf09facd\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.919546 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-utilities\") pod \"768949bb-0548-41c0-9d71-6e34bf09facd\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.919693 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xg78\" (UniqueName: \"kubernetes.io/projected/768949bb-0548-41c0-9d71-6e34bf09facd-kube-api-access-6xg78\") pod \"768949bb-0548-41c0-9d71-6e34bf09facd\" (UID: \"768949bb-0548-41c0-9d71-6e34bf09facd\") " Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.920626 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-utilities" (OuterVolumeSpecName: "utilities") pod "768949bb-0548-41c0-9d71-6e34bf09facd" (UID: "768949bb-0548-41c0-9d71-6e34bf09facd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.936432 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/768949bb-0548-41c0-9d71-6e34bf09facd-kube-api-access-6xg78" (OuterVolumeSpecName: "kube-api-access-6xg78") pod "768949bb-0548-41c0-9d71-6e34bf09facd" (UID: "768949bb-0548-41c0-9d71-6e34bf09facd"). InnerVolumeSpecName "kube-api-access-6xg78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:52:01 crc kubenswrapper[4701]: I1121 19:52:01.969888 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "768949bb-0548-41c0-9d71-6e34bf09facd" (UID: "768949bb-0548-41c0-9d71-6e34bf09facd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.023756 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xg78\" (UniqueName: \"kubernetes.io/projected/768949bb-0548-41c0-9d71-6e34bf09facd-kube-api-access-6xg78\") on node \"crc\" DevicePath \"\"" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.023836 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.023862 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/768949bb-0548-41c0-9d71-6e34bf09facd-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.118310 4701 generic.go:334] "Generic (PLEG): container finished" podID="768949bb-0548-41c0-9d71-6e34bf09facd" containerID="9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8" exitCode=0 Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.118403 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7mtc" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.119356 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerDied","Data":"9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8"} Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.119444 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7mtc" event={"ID":"768949bb-0548-41c0-9d71-6e34bf09facd","Type":"ContainerDied","Data":"979b2f7f7bf65c6e40c197d45b23318d6f185c6c73787396e33a91ce371923a4"} Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.119474 4701 scope.go:117] "RemoveContainer" containerID="9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.160938 4701 scope.go:117] "RemoveContainer" containerID="8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.163991 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v7mtc"] Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.176408 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-v7mtc"] Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.202582 4701 scope.go:117] "RemoveContainer" containerID="60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.242089 4701 scope.go:117] "RemoveContainer" containerID="9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8" Nov 21 19:52:02 crc kubenswrapper[4701]: E1121 19:52:02.242920 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8\": container with ID starting with 9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8 not found: ID does not exist" containerID="9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.242960 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8"} err="failed to get container status \"9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8\": rpc error: code = NotFound desc = could not find container \"9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8\": container with ID starting with 9686f5f34bacde3472f4655339ad0f3e17d27afa8332a25057041006aba9b9f8 not found: ID does not exist" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.242984 4701 scope.go:117] "RemoveContainer" containerID="8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52" Nov 21 19:52:02 crc kubenswrapper[4701]: E1121 19:52:02.243270 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52\": container with ID starting with 8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52 not found: ID does not exist" containerID="8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.243295 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52"} err="failed to get container status \"8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52\": rpc error: code = NotFound desc = could not find container \"8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52\": container with ID starting with 8760875cc1e41a3b10be13b6dab31520ae656f10bd3913a00d7610e6f70abd52 not found: ID does not exist" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.243310 4701 scope.go:117] "RemoveContainer" containerID="60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835" Nov 21 19:52:02 crc kubenswrapper[4701]: E1121 19:52:02.243872 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835\": container with ID starting with 60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835 not found: ID does not exist" containerID="60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835" Nov 21 19:52:02 crc kubenswrapper[4701]: I1121 19:52:02.243896 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835"} err="failed to get container status \"60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835\": rpc error: code = NotFound desc = could not find container \"60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835\": container with ID starting with 60a85e6f481e96795a4471882b58bc64598a42ac7d2f1d3b07e79e9445575835 not found: ID does not exist" Nov 21 19:52:02 crc kubenswrapper[4701]: E1121 19:52:02.386219 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod768949bb_0548_41c0_9d71_6e34bf09facd.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod768949bb_0548_41c0_9d71_6e34bf09facd.slice/crio-979b2f7f7bf65c6e40c197d45b23318d6f185c6c73787396e33a91ce371923a4\": RecentStats: unable to find data in memory cache]" Nov 21 19:52:03 crc kubenswrapper[4701]: I1121 19:52:03.397691 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 21 19:52:03 crc kubenswrapper[4701]: I1121 19:52:03.404738 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 21 19:52:03 crc kubenswrapper[4701]: I1121 19:52:03.974489 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" path="/var/lib/kubelet/pods/768949bb-0548-41c0-9d71-6e34bf09facd/volumes" Nov 21 19:52:04 crc kubenswrapper[4701]: I1121 19:52:04.161271 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 21 19:52:06 crc kubenswrapper[4701]: I1121 19:52:06.950922 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:52:06 crc kubenswrapper[4701]: E1121 19:52:06.951557 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:52:18 crc kubenswrapper[4701]: I1121 19:52:18.952144 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:52:18 crc kubenswrapper[4701]: E1121 19:52:18.954060 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:52:23 crc kubenswrapper[4701]: E1121 19:52:23.062188 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/rpm-ostreed.service\": RecentStats: unable to find data in memory cache]" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.767809 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 19:52:23 crc kubenswrapper[4701]: E1121 19:52:23.769128 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="extract-utilities" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.769163 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="extract-utilities" Nov 21 19:52:23 crc kubenswrapper[4701]: E1121 19:52:23.769197 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="extract-content" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.769229 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="extract-content" Nov 21 19:52:23 crc kubenswrapper[4701]: E1121 19:52:23.769298 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="registry-server" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.769311 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="registry-server" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.769623 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="768949bb-0548-41c0-9d71-6e34bf09facd" containerName="registry-server" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.770891 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.776568 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.776891 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.776716 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjzf9" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.777225 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.785627 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.851954 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.852497 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.852929 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-config-data\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.955585 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.955764 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.955793 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.955828 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.955878 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.955963 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.956160 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd49g\" (UniqueName: \"kubernetes.io/projected/6dd5f296-841e-4527-88fe-3963fef0e450-kube-api-access-dd49g\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.956233 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.956482 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-config-data\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.957626 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.958385 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-config-data\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:23 crc kubenswrapper[4701]: I1121 19:52:23.963814 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059103 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059167 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059197 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059245 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059863 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059891 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.059962 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.061934 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd49g\" (UniqueName: \"kubernetes.io/projected/6dd5f296-841e-4527-88fe-3963fef0e450-kube-api-access-dd49g\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.061965 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.065256 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.067086 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.084652 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd49g\" (UniqueName: \"kubernetes.io/projected/6dd5f296-841e-4527-88fe-3963fef0e450-kube-api-access-dd49g\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.121494 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.406923 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 19:52:24 crc kubenswrapper[4701]: I1121 19:52:24.953632 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 19:52:25 crc kubenswrapper[4701]: I1121 19:52:25.455738 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"6dd5f296-841e-4527-88fe-3963fef0e450","Type":"ContainerStarted","Data":"b592f74d11522362fa7dfb24fd1bd1f37eff18103eb5ab2d3a13bb9827c55783"} Nov 21 19:52:31 crc kubenswrapper[4701]: I1121 19:52:31.953737 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:52:31 crc kubenswrapper[4701]: E1121 19:52:31.954724 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:52:36 crc kubenswrapper[4701]: I1121 19:52:36.603706 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"6dd5f296-841e-4527-88fe-3963fef0e450","Type":"ContainerStarted","Data":"1b0522a64f939a6056e3229dbc55e26a865e231559bdefc50e4b89c5feada5a5"} Nov 21 19:52:36 crc kubenswrapper[4701]: I1121 19:52:36.652060 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.767032839 podStartE2EDuration="14.652031479s" podCreationTimestamp="2025-11-21 19:52:22 +0000 UTC" firstStartedPulling="2025-11-21 19:52:24.968959946 +0000 UTC m=+3035.754099983" lastFinishedPulling="2025-11-21 19:52:34.853958596 +0000 UTC m=+3045.639098623" observedRunningTime="2025-11-21 19:52:36.636341404 +0000 UTC m=+3047.421481451" watchObservedRunningTime="2025-11-21 19:52:36.652031479 +0000 UTC m=+3047.437171546" Nov 21 19:52:45 crc kubenswrapper[4701]: I1121 19:52:45.954123 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:52:45 crc kubenswrapper[4701]: E1121 19:52:45.956411 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:52:57 crc kubenswrapper[4701]: I1121 19:52:57.952501 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:52:57 crc kubenswrapper[4701]: E1121 19:52:57.953831 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:53:10 crc kubenswrapper[4701]: I1121 19:53:10.952472 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:53:10 crc kubenswrapper[4701]: E1121 19:53:10.954092 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:53:21 crc kubenswrapper[4701]: I1121 19:53:21.951179 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:53:21 crc kubenswrapper[4701]: E1121 19:53:21.952454 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:53:33 crc kubenswrapper[4701]: I1121 19:53:33.951929 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:53:33 crc kubenswrapper[4701]: E1121 19:53:33.952944 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:53:46 crc kubenswrapper[4701]: I1121 19:53:46.951481 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:53:46 crc kubenswrapper[4701]: E1121 19:53:46.952562 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:53:57 crc kubenswrapper[4701]: I1121 19:53:57.953665 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:53:57 crc kubenswrapper[4701]: E1121 19:53:57.954733 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:54:11 crc kubenswrapper[4701]: I1121 19:54:10.999080 4701 scope.go:117] "RemoveContainer" containerID="05e953564e6107afc151eb5c64f52ef88464a1150f4629fcda5b7d000eaf49a7" Nov 21 19:54:11 crc kubenswrapper[4701]: I1121 19:54:11.080022 4701 scope.go:117] "RemoveContainer" containerID="83bf3770ae2a73218c5bf3b56cf54794ba1230fcf567c313281dd7a8ca94641c" Nov 21 19:54:11 crc kubenswrapper[4701]: I1121 19:54:11.129096 4701 scope.go:117] "RemoveContainer" containerID="c6ed90ecb4f49d313c8b9abba56e4a635b5c6403613764d22bbd1a6708a2f91e" Nov 21 19:54:12 crc kubenswrapper[4701]: I1121 19:54:12.953474 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:54:12 crc kubenswrapper[4701]: E1121 19:54:12.955064 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:54:25 crc kubenswrapper[4701]: I1121 19:54:25.951773 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:54:25 crc kubenswrapper[4701]: E1121 19:54:25.952753 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:54:40 crc kubenswrapper[4701]: I1121 19:54:40.952193 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:54:40 crc kubenswrapper[4701]: E1121 19:54:40.959071 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:54:55 crc kubenswrapper[4701]: I1121 19:54:55.951794 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:54:55 crc kubenswrapper[4701]: E1121 19:54:55.953180 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:55:06 crc kubenswrapper[4701]: I1121 19:55:06.951872 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:55:06 crc kubenswrapper[4701]: E1121 19:55:06.953888 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:55:18 crc kubenswrapper[4701]: I1121 19:55:18.951403 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:55:18 crc kubenswrapper[4701]: E1121 19:55:18.952612 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:55:33 crc kubenswrapper[4701]: I1121 19:55:33.952903 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:55:33 crc kubenswrapper[4701]: E1121 19:55:33.954689 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:55:44 crc kubenswrapper[4701]: I1121 19:55:44.951536 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:55:44 crc kubenswrapper[4701]: E1121 19:55:44.952523 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:55:55 crc kubenswrapper[4701]: I1121 19:55:55.952497 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:55:55 crc kubenswrapper[4701]: E1121 19:55:55.953788 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:56:08 crc kubenswrapper[4701]: I1121 19:56:08.951675 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:56:08 crc kubenswrapper[4701]: E1121 19:56:08.952999 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 19:56:20 crc kubenswrapper[4701]: I1121 19:56:20.951906 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 19:56:21 crc kubenswrapper[4701]: I1121 19:56:21.651923 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"7d3324e3b46a4a77ff601e9e218cac0e8c546a519bd39a4ea54f051adbef5121"} Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.076810 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q7gb6"] Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.081167 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.092906 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q7gb6"] Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.105767 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlcmt\" (UniqueName: \"kubernetes.io/projected/26db3471-4df8-485f-b654-a1fa297662c8-kube-api-access-qlcmt\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.105813 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-utilities\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.105965 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-catalog-content\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.207637 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-catalog-content\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.207701 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlcmt\" (UniqueName: \"kubernetes.io/projected/26db3471-4df8-485f-b654-a1fa297662c8-kube-api-access-qlcmt\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.207731 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-utilities\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.208344 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-utilities\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.208926 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-catalog-content\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.238372 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlcmt\" (UniqueName: \"kubernetes.io/projected/26db3471-4df8-485f-b654-a1fa297662c8-kube-api-access-qlcmt\") pod \"certified-operators-q7gb6\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.428841 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:31 crc kubenswrapper[4701]: I1121 19:57:31.904251 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q7gb6"] Nov 21 19:57:32 crc kubenswrapper[4701]: I1121 19:57:32.668742 4701 generic.go:334] "Generic (PLEG): container finished" podID="26db3471-4df8-485f-b654-a1fa297662c8" containerID="61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d" exitCode=0 Nov 21 19:57:32 crc kubenswrapper[4701]: I1121 19:57:32.669111 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerDied","Data":"61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d"} Nov 21 19:57:32 crc kubenswrapper[4701]: I1121 19:57:32.669161 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerStarted","Data":"e96dad6982c8b0c63604df9d25e1b4f802118eb3b519aeba3556bf36f866fc71"} Nov 21 19:57:32 crc kubenswrapper[4701]: I1121 19:57:32.672711 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 19:57:33 crc kubenswrapper[4701]: I1121 19:57:33.680740 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerStarted","Data":"c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813"} Nov 21 19:57:34 crc kubenswrapper[4701]: I1121 19:57:34.696050 4701 generic.go:334] "Generic (PLEG): container finished" podID="26db3471-4df8-485f-b654-a1fa297662c8" containerID="c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813" exitCode=0 Nov 21 19:57:34 crc kubenswrapper[4701]: I1121 19:57:34.696159 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerDied","Data":"c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813"} Nov 21 19:57:35 crc kubenswrapper[4701]: I1121 19:57:35.709342 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerStarted","Data":"e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1"} Nov 21 19:57:35 crc kubenswrapper[4701]: I1121 19:57:35.733177 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q7gb6" podStartSLOduration=2.288807312 podStartE2EDuration="4.733152961s" podCreationTimestamp="2025-11-21 19:57:31 +0000 UTC" firstStartedPulling="2025-11-21 19:57:32.672300924 +0000 UTC m=+3343.457440981" lastFinishedPulling="2025-11-21 19:57:35.116646563 +0000 UTC m=+3345.901786630" observedRunningTime="2025-11-21 19:57:35.728416043 +0000 UTC m=+3346.513556070" watchObservedRunningTime="2025-11-21 19:57:35.733152961 +0000 UTC m=+3346.518292988" Nov 21 19:57:41 crc kubenswrapper[4701]: I1121 19:57:41.430118 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:41 crc kubenswrapper[4701]: I1121 19:57:41.431004 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:41 crc kubenswrapper[4701]: I1121 19:57:41.522523 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:41 crc kubenswrapper[4701]: I1121 19:57:41.850403 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:41 crc kubenswrapper[4701]: I1121 19:57:41.923071 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q7gb6"] Nov 21 19:57:43 crc kubenswrapper[4701]: I1121 19:57:43.809126 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q7gb6" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="registry-server" containerID="cri-o://e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1" gracePeriod=2 Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.404765 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.522702 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-utilities\") pod \"26db3471-4df8-485f-b654-a1fa297662c8\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.522766 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-catalog-content\") pod \"26db3471-4df8-485f-b654-a1fa297662c8\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.522863 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlcmt\" (UniqueName: \"kubernetes.io/projected/26db3471-4df8-485f-b654-a1fa297662c8-kube-api-access-qlcmt\") pod \"26db3471-4df8-485f-b654-a1fa297662c8\" (UID: \"26db3471-4df8-485f-b654-a1fa297662c8\") " Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.524399 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-utilities" (OuterVolumeSpecName: "utilities") pod "26db3471-4df8-485f-b654-a1fa297662c8" (UID: "26db3471-4df8-485f-b654-a1fa297662c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.537836 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26db3471-4df8-485f-b654-a1fa297662c8-kube-api-access-qlcmt" (OuterVolumeSpecName: "kube-api-access-qlcmt") pod "26db3471-4df8-485f-b654-a1fa297662c8" (UID: "26db3471-4df8-485f-b654-a1fa297662c8"). InnerVolumeSpecName "kube-api-access-qlcmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.583538 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26db3471-4df8-485f-b654-a1fa297662c8" (UID: "26db3471-4df8-485f-b654-a1fa297662c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.627058 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.627122 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26db3471-4df8-485f-b654-a1fa297662c8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.627140 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlcmt\" (UniqueName: \"kubernetes.io/projected/26db3471-4df8-485f-b654-a1fa297662c8-kube-api-access-qlcmt\") on node \"crc\" DevicePath \"\"" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.824741 4701 generic.go:334] "Generic (PLEG): container finished" podID="26db3471-4df8-485f-b654-a1fa297662c8" containerID="e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1" exitCode=0 Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.824788 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerDied","Data":"e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1"} Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.824939 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q7gb6" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.825175 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q7gb6" event={"ID":"26db3471-4df8-485f-b654-a1fa297662c8","Type":"ContainerDied","Data":"e96dad6982c8b0c63604df9d25e1b4f802118eb3b519aeba3556bf36f866fc71"} Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.824937 4701 scope.go:117] "RemoveContainer" containerID="e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.875915 4701 scope.go:117] "RemoveContainer" containerID="c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.888967 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q7gb6"] Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.899481 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q7gb6"] Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.918120 4701 scope.go:117] "RemoveContainer" containerID="61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.981080 4701 scope.go:117] "RemoveContainer" containerID="e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1" Nov 21 19:57:44 crc kubenswrapper[4701]: E1121 19:57:44.981887 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1\": container with ID starting with e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1 not found: ID does not exist" containerID="e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.981952 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1"} err="failed to get container status \"e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1\": rpc error: code = NotFound desc = could not find container \"e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1\": container with ID starting with e947a7932f3eaef7125c9539728d8f81c3b3b2b14707f68b72bf792c07de8ac1 not found: ID does not exist" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.981991 4701 scope.go:117] "RemoveContainer" containerID="c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813" Nov 21 19:57:44 crc kubenswrapper[4701]: E1121 19:57:44.982496 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813\": container with ID starting with c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813 not found: ID does not exist" containerID="c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.982554 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813"} err="failed to get container status \"c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813\": rpc error: code = NotFound desc = could not find container \"c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813\": container with ID starting with c7cb11011cbbf28bcd50ebd5920b129e6994070c9931ccec02846725d9609813 not found: ID does not exist" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.982589 4701 scope.go:117] "RemoveContainer" containerID="61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d" Nov 21 19:57:44 crc kubenswrapper[4701]: E1121 19:57:44.984567 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d\": container with ID starting with 61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d not found: ID does not exist" containerID="61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d" Nov 21 19:57:44 crc kubenswrapper[4701]: I1121 19:57:44.984634 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d"} err="failed to get container status \"61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d\": rpc error: code = NotFound desc = could not find container \"61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d\": container with ID starting with 61b34abe9136d53b3b46185d7dec401d55e4723c87322101af698541d43ede9d not found: ID does not exist" Nov 21 19:57:45 crc kubenswrapper[4701]: I1121 19:57:45.969508 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26db3471-4df8-485f-b654-a1fa297662c8" path="/var/lib/kubelet/pods/26db3471-4df8-485f-b654-a1fa297662c8/volumes" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.023098 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fx5l6"] Nov 21 19:57:51 crc kubenswrapper[4701]: E1121 19:57:51.024783 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="extract-content" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.024805 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="extract-content" Nov 21 19:57:51 crc kubenswrapper[4701]: E1121 19:57:51.024880 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="registry-server" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.024892 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="registry-server" Nov 21 19:57:51 crc kubenswrapper[4701]: E1121 19:57:51.024913 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="extract-utilities" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.024926 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="extract-utilities" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.025324 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="26db3471-4df8-485f-b654-a1fa297662c8" containerName="registry-server" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.028163 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.055417 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fx5l6"] Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.125152 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-catalog-content\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.125285 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-utilities\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.125877 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl6rr\" (UniqueName: \"kubernetes.io/projected/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-kube-api-access-wl6rr\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.228608 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-catalog-content\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.228694 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-utilities\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.228733 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl6rr\" (UniqueName: \"kubernetes.io/projected/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-kube-api-access-wl6rr\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.229602 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-utilities\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.229940 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-catalog-content\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.251644 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl6rr\" (UniqueName: \"kubernetes.io/projected/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-kube-api-access-wl6rr\") pod \"redhat-operators-fx5l6\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.374635 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.906264 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fx5l6"] Nov 21 19:57:51 crc kubenswrapper[4701]: I1121 19:57:51.937795 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerStarted","Data":"305c75d334f141f51900e00cab5c07ec8b7a3bd456fadf9888b70e90c4a68ca1"} Nov 21 19:57:52 crc kubenswrapper[4701]: I1121 19:57:52.956648 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerID="41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315" exitCode=0 Nov 21 19:57:52 crc kubenswrapper[4701]: I1121 19:57:52.956971 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerDied","Data":"41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315"} Nov 21 19:57:53 crc kubenswrapper[4701]: I1121 19:57:53.972494 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerStarted","Data":"18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b"} Nov 21 19:57:58 crc kubenswrapper[4701]: I1121 19:57:58.020949 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerID="18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b" exitCode=0 Nov 21 19:57:58 crc kubenswrapper[4701]: I1121 19:57:58.021776 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerDied","Data":"18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b"} Nov 21 19:58:03 crc kubenswrapper[4701]: I1121 19:58:03.088893 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerStarted","Data":"c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1"} Nov 21 19:58:03 crc kubenswrapper[4701]: I1121 19:58:03.115766 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fx5l6" podStartSLOduration=3.584328992 podStartE2EDuration="13.11573848s" podCreationTimestamp="2025-11-21 19:57:50 +0000 UTC" firstStartedPulling="2025-11-21 19:57:52.965711265 +0000 UTC m=+3363.750851322" lastFinishedPulling="2025-11-21 19:58:02.497120743 +0000 UTC m=+3373.282260810" observedRunningTime="2025-11-21 19:58:03.111977738 +0000 UTC m=+3373.897117765" watchObservedRunningTime="2025-11-21 19:58:03.11573848 +0000 UTC m=+3373.900878537" Nov 21 19:58:11 crc kubenswrapper[4701]: I1121 19:58:11.375080 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:58:11 crc kubenswrapper[4701]: I1121 19:58:11.376295 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:58:11 crc kubenswrapper[4701]: I1121 19:58:11.459398 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:58:12 crc kubenswrapper[4701]: I1121 19:58:12.271815 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:58:12 crc kubenswrapper[4701]: I1121 19:58:12.346221 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fx5l6"] Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.212258 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fx5l6" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="registry-server" containerID="cri-o://c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1" gracePeriod=2 Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.853966 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.906452 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl6rr\" (UniqueName: \"kubernetes.io/projected/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-kube-api-access-wl6rr\") pod \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.906800 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-catalog-content\") pod \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.906903 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-utilities\") pod \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\" (UID: \"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a\") " Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.908072 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-utilities" (OuterVolumeSpecName: "utilities") pod "ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" (UID: "ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:58:14 crc kubenswrapper[4701]: I1121 19:58:14.925403 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-kube-api-access-wl6rr" (OuterVolumeSpecName: "kube-api-access-wl6rr") pod "ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" (UID: "ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a"). InnerVolumeSpecName "kube-api-access-wl6rr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.010117 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" (UID: "ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.012430 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.012578 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl6rr\" (UniqueName: \"kubernetes.io/projected/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-kube-api-access-wl6rr\") on node \"crc\" DevicePath \"\"" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.012672 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.230745 4701 generic.go:334] "Generic (PLEG): container finished" podID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerID="c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1" exitCode=0 Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.230829 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerDied","Data":"c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1"} Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.230856 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx5l6" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.230885 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx5l6" event={"ID":"ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a","Type":"ContainerDied","Data":"305c75d334f141f51900e00cab5c07ec8b7a3bd456fadf9888b70e90c4a68ca1"} Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.230922 4701 scope.go:117] "RemoveContainer" containerID="c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.260934 4701 scope.go:117] "RemoveContainer" containerID="18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.293503 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fx5l6"] Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.305327 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fx5l6"] Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.334785 4701 scope.go:117] "RemoveContainer" containerID="41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.374277 4701 scope.go:117] "RemoveContainer" containerID="c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1" Nov 21 19:58:15 crc kubenswrapper[4701]: E1121 19:58:15.375044 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1\": container with ID starting with c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1 not found: ID does not exist" containerID="c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.375143 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1"} err="failed to get container status \"c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1\": rpc error: code = NotFound desc = could not find container \"c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1\": container with ID starting with c1fde5dd31ce125d47856b16febabcda1e6d4d3de0f7ce5a44dcd8eb01d1e1d1 not found: ID does not exist" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.375180 4701 scope.go:117] "RemoveContainer" containerID="18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b" Nov 21 19:58:15 crc kubenswrapper[4701]: E1121 19:58:15.375817 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b\": container with ID starting with 18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b not found: ID does not exist" containerID="18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.375859 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b"} err="failed to get container status \"18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b\": rpc error: code = NotFound desc = could not find container \"18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b\": container with ID starting with 18bcdbdb0c29eea016803d22310ba07bd0f666c3a65dde836e8129ceba03372b not found: ID does not exist" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.375889 4701 scope.go:117] "RemoveContainer" containerID="41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315" Nov 21 19:58:15 crc kubenswrapper[4701]: E1121 19:58:15.376144 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315\": container with ID starting with 41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315 not found: ID does not exist" containerID="41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.376164 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315"} err="failed to get container status \"41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315\": rpc error: code = NotFound desc = could not find container \"41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315\": container with ID starting with 41c28142c886761f7a849b7f5049f3fe356fcec901202218712c4cf08cba6315 not found: ID does not exist" Nov 21 19:58:15 crc kubenswrapper[4701]: I1121 19:58:15.971653 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" path="/var/lib/kubelet/pods/ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a/volumes" Nov 21 19:58:48 crc kubenswrapper[4701]: I1121 19:58:48.613464 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:58:48 crc kubenswrapper[4701]: I1121 19:58:48.614447 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:59:18 crc kubenswrapper[4701]: I1121 19:59:18.614108 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:59:18 crc kubenswrapper[4701]: I1121 19:59:18.615062 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:59:48 crc kubenswrapper[4701]: I1121 19:59:48.613572 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 19:59:48 crc kubenswrapper[4701]: I1121 19:59:48.614824 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 19:59:48 crc kubenswrapper[4701]: I1121 19:59:48.614923 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 19:59:48 crc kubenswrapper[4701]: I1121 19:59:48.616474 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7d3324e3b46a4a77ff601e9e218cac0e8c546a519bd39a4ea54f051adbef5121"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 19:59:48 crc kubenswrapper[4701]: I1121 19:59:48.616586 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://7d3324e3b46a4a77ff601e9e218cac0e8c546a519bd39a4ea54f051adbef5121" gracePeriod=600 Nov 21 19:59:49 crc kubenswrapper[4701]: I1121 19:59:49.571792 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="7d3324e3b46a4a77ff601e9e218cac0e8c546a519bd39a4ea54f051adbef5121" exitCode=0 Nov 21 19:59:49 crc kubenswrapper[4701]: I1121 19:59:49.571916 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"7d3324e3b46a4a77ff601e9e218cac0e8c546a519bd39a4ea54f051adbef5121"} Nov 21 19:59:49 crc kubenswrapper[4701]: I1121 19:59:49.572874 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7"} Nov 21 19:59:49 crc kubenswrapper[4701]: I1121 19:59:49.572914 4701 scope.go:117] "RemoveContainer" containerID="ea89037636518203b6d07d6774a86e894d096cd957d648cc556a387001e667da" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.226035 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz"] Nov 21 20:00:00 crc kubenswrapper[4701]: E1121 20:00:00.227675 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="extract-content" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.227703 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="extract-content" Nov 21 20:00:00 crc kubenswrapper[4701]: E1121 20:00:00.227749 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="extract-utilities" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.227761 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="extract-utilities" Nov 21 20:00:00 crc kubenswrapper[4701]: E1121 20:00:00.227785 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="registry-server" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.227801 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="registry-server" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.228174 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae38cf70-4f68-4bdf-a0d4-7c7cbe51bb5a" containerName="registry-server" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.229647 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.232453 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.235821 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.239257 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz"] Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.338297 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl7sj\" (UniqueName: \"kubernetes.io/projected/9f3a1f12-3ef6-41f0-9290-273103bcac4f-kube-api-access-gl7sj\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.338390 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3a1f12-3ef6-41f0-9290-273103bcac4f-config-volume\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.339130 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3a1f12-3ef6-41f0-9290-273103bcac4f-secret-volume\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.442287 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3a1f12-3ef6-41f0-9290-273103bcac4f-config-volume\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.442839 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3a1f12-3ef6-41f0-9290-273103bcac4f-secret-volume\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.443006 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl7sj\" (UniqueName: \"kubernetes.io/projected/9f3a1f12-3ef6-41f0-9290-273103bcac4f-kube-api-access-gl7sj\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.444377 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3a1f12-3ef6-41f0-9290-273103bcac4f-config-volume\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.454104 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3a1f12-3ef6-41f0-9290-273103bcac4f-secret-volume\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.474180 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl7sj\" (UniqueName: \"kubernetes.io/projected/9f3a1f12-3ef6-41f0-9290-273103bcac4f-kube-api-access-gl7sj\") pod \"collect-profiles-29395920-jhddz\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:00 crc kubenswrapper[4701]: I1121 20:00:00.560248 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:01 crc kubenswrapper[4701]: I1121 20:00:01.120827 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz"] Nov 21 20:00:01 crc kubenswrapper[4701]: I1121 20:00:01.768435 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" event={"ID":"9f3a1f12-3ef6-41f0-9290-273103bcac4f","Type":"ContainerStarted","Data":"0c0c7d5a057b6bb2a6b7979c93c402ebd676f99ce76988ad488eab4ace449bb2"} Nov 21 20:00:01 crc kubenswrapper[4701]: I1121 20:00:01.768504 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" event={"ID":"9f3a1f12-3ef6-41f0-9290-273103bcac4f","Type":"ContainerStarted","Data":"fdd21b8601f01eedd7aac056dfe94561aca09321f7357af74319714e063f22f7"} Nov 21 20:00:01 crc kubenswrapper[4701]: I1121 20:00:01.797943 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" podStartSLOduration=1.7979140230000001 podStartE2EDuration="1.797914023s" podCreationTimestamp="2025-11-21 20:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 20:00:01.790930204 +0000 UTC m=+3492.576070241" watchObservedRunningTime="2025-11-21 20:00:01.797914023 +0000 UTC m=+3492.583054090" Nov 21 20:00:02 crc kubenswrapper[4701]: I1121 20:00:02.790026 4701 generic.go:334] "Generic (PLEG): container finished" podID="9f3a1f12-3ef6-41f0-9290-273103bcac4f" containerID="0c0c7d5a057b6bb2a6b7979c93c402ebd676f99ce76988ad488eab4ace449bb2" exitCode=0 Nov 21 20:00:02 crc kubenswrapper[4701]: I1121 20:00:02.790343 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" event={"ID":"9f3a1f12-3ef6-41f0-9290-273103bcac4f","Type":"ContainerDied","Data":"0c0c7d5a057b6bb2a6b7979c93c402ebd676f99ce76988ad488eab4ace449bb2"} Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.252396 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.378454 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl7sj\" (UniqueName: \"kubernetes.io/projected/9f3a1f12-3ef6-41f0-9290-273103bcac4f-kube-api-access-gl7sj\") pod \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.378820 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3a1f12-3ef6-41f0-9290-273103bcac4f-config-volume\") pod \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.379067 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3a1f12-3ef6-41f0-9290-273103bcac4f-secret-volume\") pod \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\" (UID: \"9f3a1f12-3ef6-41f0-9290-273103bcac4f\") " Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.379800 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f3a1f12-3ef6-41f0-9290-273103bcac4f-config-volume" (OuterVolumeSpecName: "config-volume") pod "9f3a1f12-3ef6-41f0-9290-273103bcac4f" (UID: "9f3a1f12-3ef6-41f0-9290-273103bcac4f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.386738 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f3a1f12-3ef6-41f0-9290-273103bcac4f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9f3a1f12-3ef6-41f0-9290-273103bcac4f" (UID: "9f3a1f12-3ef6-41f0-9290-273103bcac4f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.393366 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f3a1f12-3ef6-41f0-9290-273103bcac4f-kube-api-access-gl7sj" (OuterVolumeSpecName: "kube-api-access-gl7sj") pod "9f3a1f12-3ef6-41f0-9290-273103bcac4f" (UID: "9f3a1f12-3ef6-41f0-9290-273103bcac4f"). InnerVolumeSpecName "kube-api-access-gl7sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.482039 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3a1f12-3ef6-41f0-9290-273103bcac4f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.482076 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3a1f12-3ef6-41f0-9290-273103bcac4f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.482086 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl7sj\" (UniqueName: \"kubernetes.io/projected/9f3a1f12-3ef6-41f0-9290-273103bcac4f-kube-api-access-gl7sj\") on node \"crc\" DevicePath \"\"" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.820815 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" event={"ID":"9f3a1f12-3ef6-41f0-9290-273103bcac4f","Type":"ContainerDied","Data":"fdd21b8601f01eedd7aac056dfe94561aca09321f7357af74319714e063f22f7"} Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.820865 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fdd21b8601f01eedd7aac056dfe94561aca09321f7357af74319714e063f22f7" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.820933 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz" Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.913881 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq"] Nov 21 20:00:04 crc kubenswrapper[4701]: I1121 20:00:04.930519 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395875-xjspq"] Nov 21 20:00:05 crc kubenswrapper[4701]: I1121 20:00:05.975343 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49b5f1c9-9af4-4f31-bc1b-187255d3c54d" path="/var/lib/kubelet/pods/49b5f1c9-9af4-4f31-bc1b-187255d3c54d/volumes" Nov 21 20:00:11 crc kubenswrapper[4701]: I1121 20:00:11.370325 4701 scope.go:117] "RemoveContainer" containerID="b35425b01358249b044adc8ab2eda4643c4fc378e5abf6dd76d0a26346c8d878" Nov 21 20:00:39 crc kubenswrapper[4701]: I1121 20:00:39.884149 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8fhck"] Nov 21 20:00:39 crc kubenswrapper[4701]: E1121 20:00:39.885527 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f3a1f12-3ef6-41f0-9290-273103bcac4f" containerName="collect-profiles" Nov 21 20:00:39 crc kubenswrapper[4701]: I1121 20:00:39.885546 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f3a1f12-3ef6-41f0-9290-273103bcac4f" containerName="collect-profiles" Nov 21 20:00:39 crc kubenswrapper[4701]: I1121 20:00:39.885847 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f3a1f12-3ef6-41f0-9290-273103bcac4f" containerName="collect-profiles" Nov 21 20:00:39 crc kubenswrapper[4701]: I1121 20:00:39.887959 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:39 crc kubenswrapper[4701]: I1121 20:00:39.901111 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fhck"] Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.010674 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-utilities\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.011379 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-catalog-content\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.011460 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x69xk\" (UniqueName: \"kubernetes.io/projected/8ca32655-06d1-4410-a5ea-c529c66290b0-kube-api-access-x69xk\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.114102 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-utilities\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.114431 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-catalog-content\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.114472 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x69xk\" (UniqueName: \"kubernetes.io/projected/8ca32655-06d1-4410-a5ea-c529c66290b0-kube-api-access-x69xk\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.116106 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-catalog-content\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.116235 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-utilities\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.150484 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x69xk\" (UniqueName: \"kubernetes.io/projected/8ca32655-06d1-4410-a5ea-c529c66290b0-kube-api-access-x69xk\") pod \"redhat-marketplace-8fhck\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.242345 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:40 crc kubenswrapper[4701]: I1121 20:00:40.802167 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fhck"] Nov 21 20:00:41 crc kubenswrapper[4701]: I1121 20:00:41.369026 4701 generic.go:334] "Generic (PLEG): container finished" podID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerID="a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024" exitCode=0 Nov 21 20:00:41 crc kubenswrapper[4701]: I1121 20:00:41.369149 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerDied","Data":"a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024"} Nov 21 20:00:41 crc kubenswrapper[4701]: I1121 20:00:41.369695 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerStarted","Data":"f248423126b711705afc36c613ce4bb022e29ee400d83a6e55eb0cb5a8b92897"} Nov 21 20:00:42 crc kubenswrapper[4701]: I1121 20:00:42.382846 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerStarted","Data":"78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb"} Nov 21 20:00:43 crc kubenswrapper[4701]: I1121 20:00:43.442016 4701 generic.go:334] "Generic (PLEG): container finished" podID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerID="78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb" exitCode=0 Nov 21 20:00:43 crc kubenswrapper[4701]: I1121 20:00:43.442117 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerDied","Data":"78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb"} Nov 21 20:00:44 crc kubenswrapper[4701]: I1121 20:00:44.462683 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerStarted","Data":"4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608"} Nov 21 20:00:44 crc kubenswrapper[4701]: I1121 20:00:44.496420 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8fhck" podStartSLOduration=3.000258811 podStartE2EDuration="5.496391931s" podCreationTimestamp="2025-11-21 20:00:39 +0000 UTC" firstStartedPulling="2025-11-21 20:00:41.371840379 +0000 UTC m=+3532.156980416" lastFinishedPulling="2025-11-21 20:00:43.867973499 +0000 UTC m=+3534.653113536" observedRunningTime="2025-11-21 20:00:44.488281701 +0000 UTC m=+3535.273421738" watchObservedRunningTime="2025-11-21 20:00:44.496391931 +0000 UTC m=+3535.281531968" Nov 21 20:00:50 crc kubenswrapper[4701]: I1121 20:00:50.243482 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:50 crc kubenswrapper[4701]: I1121 20:00:50.244396 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:50 crc kubenswrapper[4701]: I1121 20:00:50.316920 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:50 crc kubenswrapper[4701]: I1121 20:00:50.606857 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:50 crc kubenswrapper[4701]: I1121 20:00:50.850949 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fhck"] Nov 21 20:00:52 crc kubenswrapper[4701]: I1121 20:00:52.565916 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8fhck" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="registry-server" containerID="cri-o://4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608" gracePeriod=2 Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.075529 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.190377 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x69xk\" (UniqueName: \"kubernetes.io/projected/8ca32655-06d1-4410-a5ea-c529c66290b0-kube-api-access-x69xk\") pod \"8ca32655-06d1-4410-a5ea-c529c66290b0\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.190475 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-catalog-content\") pod \"8ca32655-06d1-4410-a5ea-c529c66290b0\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.190615 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-utilities\") pod \"8ca32655-06d1-4410-a5ea-c529c66290b0\" (UID: \"8ca32655-06d1-4410-a5ea-c529c66290b0\") " Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.192016 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-utilities" (OuterVolumeSpecName: "utilities") pod "8ca32655-06d1-4410-a5ea-c529c66290b0" (UID: "8ca32655-06d1-4410-a5ea-c529c66290b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.193351 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.216535 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ca32655-06d1-4410-a5ea-c529c66290b0-kube-api-access-x69xk" (OuterVolumeSpecName: "kube-api-access-x69xk") pod "8ca32655-06d1-4410-a5ea-c529c66290b0" (UID: "8ca32655-06d1-4410-a5ea-c529c66290b0"). InnerVolumeSpecName "kube-api-access-x69xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.218652 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ca32655-06d1-4410-a5ea-c529c66290b0" (UID: "8ca32655-06d1-4410-a5ea-c529c66290b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.296175 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x69xk\" (UniqueName: \"kubernetes.io/projected/8ca32655-06d1-4410-a5ea-c529c66290b0-kube-api-access-x69xk\") on node \"crc\" DevicePath \"\"" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.296236 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ca32655-06d1-4410-a5ea-c529c66290b0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.581730 4701 generic.go:334] "Generic (PLEG): container finished" podID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerID="4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608" exitCode=0 Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.581789 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerDied","Data":"4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608"} Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.581823 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fhck" event={"ID":"8ca32655-06d1-4410-a5ea-c529c66290b0","Type":"ContainerDied","Data":"f248423126b711705afc36c613ce4bb022e29ee400d83a6e55eb0cb5a8b92897"} Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.581848 4701 scope.go:117] "RemoveContainer" containerID="4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.582019 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fhck" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.641307 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fhck"] Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.645468 4701 scope.go:117] "RemoveContainer" containerID="78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.653730 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fhck"] Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.675674 4701 scope.go:117] "RemoveContainer" containerID="a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.720368 4701 scope.go:117] "RemoveContainer" containerID="4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608" Nov 21 20:00:53 crc kubenswrapper[4701]: E1121 20:00:53.720932 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608\": container with ID starting with 4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608 not found: ID does not exist" containerID="4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.721074 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608"} err="failed to get container status \"4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608\": rpc error: code = NotFound desc = could not find container \"4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608\": container with ID starting with 4dfea3e925da6f7922208fa7309d543f5952c9d3477131ae34040f4dcde14608 not found: ID does not exist" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.721188 4701 scope.go:117] "RemoveContainer" containerID="78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb" Nov 21 20:00:53 crc kubenswrapper[4701]: E1121 20:00:53.721716 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb\": container with ID starting with 78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb not found: ID does not exist" containerID="78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.721763 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb"} err="failed to get container status \"78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb\": rpc error: code = NotFound desc = could not find container \"78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb\": container with ID starting with 78b2d431c6ff080e73d3b96ab086270b481a8ef824cc72eea153cdcaa6831dcb not found: ID does not exist" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.721793 4701 scope.go:117] "RemoveContainer" containerID="a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024" Nov 21 20:00:53 crc kubenswrapper[4701]: E1121 20:00:53.722279 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024\": container with ID starting with a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024 not found: ID does not exist" containerID="a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.722484 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024"} err="failed to get container status \"a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024\": rpc error: code = NotFound desc = could not find container \"a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024\": container with ID starting with a8b56b3ed29e0c378f9f7b90338f617bf686bcf642475273ea157a10763bc024 not found: ID does not exist" Nov 21 20:00:53 crc kubenswrapper[4701]: I1121 20:00:53.963795 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" path="/var/lib/kubelet/pods/8ca32655-06d1-4410-a5ea-c529c66290b0/volumes" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.199024 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29395921-cv2wk"] Nov 21 20:01:00 crc kubenswrapper[4701]: E1121 20:01:00.200641 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="extract-content" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.200668 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="extract-content" Nov 21 20:01:00 crc kubenswrapper[4701]: E1121 20:01:00.200715 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="extract-utilities" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.200730 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="extract-utilities" Nov 21 20:01:00 crc kubenswrapper[4701]: E1121 20:01:00.200802 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="registry-server" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.200819 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="registry-server" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.201190 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ca32655-06d1-4410-a5ea-c529c66290b0" containerName="registry-server" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.202782 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.216983 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395921-cv2wk"] Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.321037 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-config-data\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.321329 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-fernet-keys\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.321376 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slr9r\" (UniqueName: \"kubernetes.io/projected/4039a132-b77c-4449-baa0-c79e6940472f-kube-api-access-slr9r\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.321427 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-combined-ca-bundle\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.424019 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-config-data\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.424263 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-fernet-keys\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.424298 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slr9r\" (UniqueName: \"kubernetes.io/projected/4039a132-b77c-4449-baa0-c79e6940472f-kube-api-access-slr9r\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.424348 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-combined-ca-bundle\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.434725 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-fernet-keys\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.435533 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-combined-ca-bundle\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.436717 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-config-data\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.448995 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slr9r\" (UniqueName: \"kubernetes.io/projected/4039a132-b77c-4449-baa0-c79e6940472f-kube-api-access-slr9r\") pod \"keystone-cron-29395921-cv2wk\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:00 crc kubenswrapper[4701]: I1121 20:01:00.542632 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:01 crc kubenswrapper[4701]: I1121 20:01:01.074002 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395921-cv2wk"] Nov 21 20:01:01 crc kubenswrapper[4701]: I1121 20:01:01.687040 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395921-cv2wk" event={"ID":"4039a132-b77c-4449-baa0-c79e6940472f","Type":"ContainerStarted","Data":"86f3a314bb2a6013d8b239827bd06329243b7648c638d8f1d56a69cb3361df59"} Nov 21 20:01:01 crc kubenswrapper[4701]: I1121 20:01:01.687466 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395921-cv2wk" event={"ID":"4039a132-b77c-4449-baa0-c79e6940472f","Type":"ContainerStarted","Data":"ddd3cd4c43db5489a7c2cdf14f7fce9373f7f808aa3426e5939a945599124e03"} Nov 21 20:01:01 crc kubenswrapper[4701]: I1121 20:01:01.713846 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29395921-cv2wk" podStartSLOduration=1.7138085950000002 podStartE2EDuration="1.713808595s" podCreationTimestamp="2025-11-21 20:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 20:01:01.704687228 +0000 UTC m=+3552.489827265" watchObservedRunningTime="2025-11-21 20:01:01.713808595 +0000 UTC m=+3552.498948652" Nov 21 20:01:05 crc kubenswrapper[4701]: I1121 20:01:05.737920 4701 generic.go:334] "Generic (PLEG): container finished" podID="4039a132-b77c-4449-baa0-c79e6940472f" containerID="86f3a314bb2a6013d8b239827bd06329243b7648c638d8f1d56a69cb3361df59" exitCode=0 Nov 21 20:01:05 crc kubenswrapper[4701]: I1121 20:01:05.738058 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395921-cv2wk" event={"ID":"4039a132-b77c-4449-baa0-c79e6940472f","Type":"ContainerDied","Data":"86f3a314bb2a6013d8b239827bd06329243b7648c638d8f1d56a69cb3361df59"} Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.267225 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.315371 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-combined-ca-bundle\") pod \"4039a132-b77c-4449-baa0-c79e6940472f\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.315508 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-fernet-keys\") pod \"4039a132-b77c-4449-baa0-c79e6940472f\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.315676 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-config-data\") pod \"4039a132-b77c-4449-baa0-c79e6940472f\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.315711 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slr9r\" (UniqueName: \"kubernetes.io/projected/4039a132-b77c-4449-baa0-c79e6940472f-kube-api-access-slr9r\") pod \"4039a132-b77c-4449-baa0-c79e6940472f\" (UID: \"4039a132-b77c-4449-baa0-c79e6940472f\") " Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.336566 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4039a132-b77c-4449-baa0-c79e6940472f" (UID: "4039a132-b77c-4449-baa0-c79e6940472f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.340351 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4039a132-b77c-4449-baa0-c79e6940472f-kube-api-access-slr9r" (OuterVolumeSpecName: "kube-api-access-slr9r") pod "4039a132-b77c-4449-baa0-c79e6940472f" (UID: "4039a132-b77c-4449-baa0-c79e6940472f"). InnerVolumeSpecName "kube-api-access-slr9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.357404 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4039a132-b77c-4449-baa0-c79e6940472f" (UID: "4039a132-b77c-4449-baa0-c79e6940472f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.388563 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-config-data" (OuterVolumeSpecName: "config-data") pod "4039a132-b77c-4449-baa0-c79e6940472f" (UID: "4039a132-b77c-4449-baa0-c79e6940472f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.420169 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.420251 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slr9r\" (UniqueName: \"kubernetes.io/projected/4039a132-b77c-4449-baa0-c79e6940472f-kube-api-access-slr9r\") on node \"crc\" DevicePath \"\"" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.420273 4701 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.420291 4701 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4039a132-b77c-4449-baa0-c79e6940472f-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.768839 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395921-cv2wk" event={"ID":"4039a132-b77c-4449-baa0-c79e6940472f","Type":"ContainerDied","Data":"ddd3cd4c43db5489a7c2cdf14f7fce9373f7f808aa3426e5939a945599124e03"} Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.769291 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddd3cd4c43db5489a7c2cdf14f7fce9373f7f808aa3426e5939a945599124e03" Nov 21 20:01:07 crc kubenswrapper[4701]: I1121 20:01:07.768953 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395921-cv2wk" Nov 21 20:01:48 crc kubenswrapper[4701]: I1121 20:01:48.613825 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:01:48 crc kubenswrapper[4701]: I1121 20:01:48.614735 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:02:18 crc kubenswrapper[4701]: I1121 20:02:18.614117 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:02:18 crc kubenswrapper[4701]: I1121 20:02:18.615051 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:02:23 crc kubenswrapper[4701]: I1121 20:02:23.746943 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-66cbbc6b59-4jhxd" podUID="567ed826-1db0-4018-b4ea-8af42596aa3e" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.355267 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-92bfg"] Nov 21 20:02:42 crc kubenswrapper[4701]: E1121 20:02:42.356436 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4039a132-b77c-4449-baa0-c79e6940472f" containerName="keystone-cron" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.356457 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4039a132-b77c-4449-baa0-c79e6940472f" containerName="keystone-cron" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.356766 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4039a132-b77c-4449-baa0-c79e6940472f" containerName="keystone-cron" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.359289 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.386390 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-utilities\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.386611 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-catalog-content\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.386650 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtctp\" (UniqueName: \"kubernetes.io/projected/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-kube-api-access-mtctp\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.388970 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-92bfg"] Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.489136 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-catalog-content\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.489533 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtctp\" (UniqueName: \"kubernetes.io/projected/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-kube-api-access-mtctp\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.489634 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-utilities\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.490268 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-catalog-content\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.490527 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-utilities\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.513440 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtctp\" (UniqueName: \"kubernetes.io/projected/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-kube-api-access-mtctp\") pod \"community-operators-92bfg\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:42 crc kubenswrapper[4701]: I1121 20:02:42.703429 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:43 crc kubenswrapper[4701]: I1121 20:02:43.170014 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-92bfg"] Nov 21 20:02:44 crc kubenswrapper[4701]: I1121 20:02:44.031142 4701 generic.go:334] "Generic (PLEG): container finished" podID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerID="a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee" exitCode=0 Nov 21 20:02:44 crc kubenswrapper[4701]: I1121 20:02:44.031260 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerDied","Data":"a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee"} Nov 21 20:02:44 crc kubenswrapper[4701]: I1121 20:02:44.031593 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerStarted","Data":"196bfe3f40deb9790fa9f1df235abc578e50d43f75d4a26e88fec19aa8ff3075"} Nov 21 20:02:44 crc kubenswrapper[4701]: I1121 20:02:44.033951 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:02:46 crc kubenswrapper[4701]: I1121 20:02:46.064276 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerStarted","Data":"b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd"} Nov 21 20:02:47 crc kubenswrapper[4701]: I1121 20:02:47.086315 4701 generic.go:334] "Generic (PLEG): container finished" podID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerID="b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd" exitCode=0 Nov 21 20:02:47 crc kubenswrapper[4701]: I1121 20:02:47.086376 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerDied","Data":"b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd"} Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.105609 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerStarted","Data":"62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6"} Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.150600 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-92bfg" podStartSLOduration=2.690173515 podStartE2EDuration="6.150570925s" podCreationTimestamp="2025-11-21 20:02:42 +0000 UTC" firstStartedPulling="2025-11-21 20:02:44.033661494 +0000 UTC m=+3654.818801521" lastFinishedPulling="2025-11-21 20:02:47.494058864 +0000 UTC m=+3658.279198931" observedRunningTime="2025-11-21 20:02:48.132713333 +0000 UTC m=+3658.917853400" watchObservedRunningTime="2025-11-21 20:02:48.150570925 +0000 UTC m=+3658.935710972" Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.613433 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.613515 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.613583 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.614888 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:02:48 crc kubenswrapper[4701]: I1121 20:02:48.614986 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" gracePeriod=600 Nov 21 20:02:48 crc kubenswrapper[4701]: E1121 20:02:48.769092 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:02:49 crc kubenswrapper[4701]: I1121 20:02:49.125649 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" exitCode=0 Nov 21 20:02:49 crc kubenswrapper[4701]: I1121 20:02:49.127225 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7"} Nov 21 20:02:49 crc kubenswrapper[4701]: I1121 20:02:49.127265 4701 scope.go:117] "RemoveContainer" containerID="7d3324e3b46a4a77ff601e9e218cac0e8c546a519bd39a4ea54f051adbef5121" Nov 21 20:02:49 crc kubenswrapper[4701]: I1121 20:02:49.127657 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:02:49 crc kubenswrapper[4701]: E1121 20:02:49.127880 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:02:52 crc kubenswrapper[4701]: I1121 20:02:52.704714 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:52 crc kubenswrapper[4701]: I1121 20:02:52.705927 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:52 crc kubenswrapper[4701]: I1121 20:02:52.779123 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:53 crc kubenswrapper[4701]: I1121 20:02:53.267274 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:53 crc kubenswrapper[4701]: I1121 20:02:53.330009 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-92bfg"] Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.229465 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-92bfg" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="registry-server" containerID="cri-o://62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6" gracePeriod=2 Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.877082 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.968590 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-catalog-content\") pod \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.968891 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtctp\" (UniqueName: \"kubernetes.io/projected/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-kube-api-access-mtctp\") pod \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.968940 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-utilities\") pod \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\" (UID: \"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5\") " Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.969760 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-utilities" (OuterVolumeSpecName: "utilities") pod "ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" (UID: "ba9ac17d-b826-486c-9e7c-c78fed1ec3b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:02:55 crc kubenswrapper[4701]: I1121 20:02:55.975681 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-kube-api-access-mtctp" (OuterVolumeSpecName: "kube-api-access-mtctp") pod "ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" (UID: "ba9ac17d-b826-486c-9e7c-c78fed1ec3b5"). InnerVolumeSpecName "kube-api-access-mtctp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.032579 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" (UID: "ba9ac17d-b826-486c-9e7c-c78fed1ec3b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.072715 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtctp\" (UniqueName: \"kubernetes.io/projected/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-kube-api-access-mtctp\") on node \"crc\" DevicePath \"\"" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.072756 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.072768 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.247790 4701 generic.go:334] "Generic (PLEG): container finished" podID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerID="62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6" exitCode=0 Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.247867 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerDied","Data":"62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6"} Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.247937 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92bfg" event={"ID":"ba9ac17d-b826-486c-9e7c-c78fed1ec3b5","Type":"ContainerDied","Data":"196bfe3f40deb9790fa9f1df235abc578e50d43f75d4a26e88fec19aa8ff3075"} Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.247974 4701 scope.go:117] "RemoveContainer" containerID="62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.247971 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92bfg" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.288505 4701 scope.go:117] "RemoveContainer" containerID="b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.323421 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-92bfg"] Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.336095 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-92bfg"] Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.344131 4701 scope.go:117] "RemoveContainer" containerID="a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.399899 4701 scope.go:117] "RemoveContainer" containerID="62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6" Nov 21 20:02:56 crc kubenswrapper[4701]: E1121 20:02:56.400800 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6\": container with ID starting with 62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6 not found: ID does not exist" containerID="62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.400882 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6"} err="failed to get container status \"62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6\": rpc error: code = NotFound desc = could not find container \"62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6\": container with ID starting with 62fa5b1c77fb783de4c9f3df38d34423387eafc00ef2c2d447c2fa112ad4d8c6 not found: ID does not exist" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.400931 4701 scope.go:117] "RemoveContainer" containerID="b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd" Nov 21 20:02:56 crc kubenswrapper[4701]: E1121 20:02:56.401521 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd\": container with ID starting with b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd not found: ID does not exist" containerID="b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.401603 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd"} err="failed to get container status \"b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd\": rpc error: code = NotFound desc = could not find container \"b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd\": container with ID starting with b6c44df466f69900f029c7d4a1661742c14a1f5b1e504705a247dcfd809163bd not found: ID does not exist" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.401659 4701 scope.go:117] "RemoveContainer" containerID="a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee" Nov 21 20:02:56 crc kubenswrapper[4701]: E1121 20:02:56.402135 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee\": container with ID starting with a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee not found: ID does not exist" containerID="a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee" Nov 21 20:02:56 crc kubenswrapper[4701]: I1121 20:02:56.402172 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee"} err="failed to get container status \"a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee\": rpc error: code = NotFound desc = could not find container \"a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee\": container with ID starting with a29294907b4ea3ca7eb8823a9cf8f63944861a9e1dddeda9ff6d1c6bca8df1ee not found: ID does not exist" Nov 21 20:02:57 crc kubenswrapper[4701]: I1121 20:02:57.974116 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" path="/var/lib/kubelet/pods/ba9ac17d-b826-486c-9e7c-c78fed1ec3b5/volumes" Nov 21 20:02:59 crc kubenswrapper[4701]: I1121 20:02:59.977529 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:02:59 crc kubenswrapper[4701]: E1121 20:02:59.978153 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:03:14 crc kubenswrapper[4701]: I1121 20:03:14.951732 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:03:14 crc kubenswrapper[4701]: E1121 20:03:14.953017 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:03:27 crc kubenswrapper[4701]: I1121 20:03:27.951276 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:03:27 crc kubenswrapper[4701]: E1121 20:03:27.952342 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:03:40 crc kubenswrapper[4701]: I1121 20:03:40.952231 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:03:40 crc kubenswrapper[4701]: E1121 20:03:40.953146 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:03:52 crc kubenswrapper[4701]: I1121 20:03:52.953930 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:03:52 crc kubenswrapper[4701]: E1121 20:03:52.955137 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:04:03 crc kubenswrapper[4701]: I1121 20:04:03.955534 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:04:03 crc kubenswrapper[4701]: E1121 20:04:03.956572 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:04:14 crc kubenswrapper[4701]: I1121 20:04:14.952114 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:04:14 crc kubenswrapper[4701]: E1121 20:04:14.955687 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:04:24 crc kubenswrapper[4701]: I1121 20:04:24.189916 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-v9dtr" podUID="08bf0aaf-b621-48f2-b2b1-c6939a9a3440" containerName="registry-server" probeResult="failure" output=< Nov 21 20:04:24 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:04:24 crc kubenswrapper[4701]: > Nov 21 20:04:24 crc kubenswrapper[4701]: I1121 20:04:24.191949 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-v9dtr" podUID="08bf0aaf-b621-48f2-b2b1-c6939a9a3440" containerName="registry-server" probeResult="failure" output=< Nov 21 20:04:24 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:04:24 crc kubenswrapper[4701]: > Nov 21 20:04:25 crc kubenswrapper[4701]: I1121 20:04:25.951264 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:04:25 crc kubenswrapper[4701]: E1121 20:04:25.953505 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:04:39 crc kubenswrapper[4701]: I1121 20:04:39.965991 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:04:39 crc kubenswrapper[4701]: E1121 20:04:39.967464 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:04:51 crc kubenswrapper[4701]: I1121 20:04:51.951192 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:04:51 crc kubenswrapper[4701]: E1121 20:04:51.952239 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:05:03 crc kubenswrapper[4701]: I1121 20:05:03.962456 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:05:03 crc kubenswrapper[4701]: E1121 20:05:03.963645 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:05:17 crc kubenswrapper[4701]: I1121 20:05:17.953842 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:05:17 crc kubenswrapper[4701]: E1121 20:05:17.955034 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:05:28 crc kubenswrapper[4701]: I1121 20:05:28.951713 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:05:28 crc kubenswrapper[4701]: E1121 20:05:28.953004 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:05:43 crc kubenswrapper[4701]: I1121 20:05:43.952891 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:05:43 crc kubenswrapper[4701]: E1121 20:05:43.954243 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:05:56 crc kubenswrapper[4701]: I1121 20:05:56.951956 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:05:56 crc kubenswrapper[4701]: E1121 20:05:56.953129 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:06:07 crc kubenswrapper[4701]: I1121 20:06:07.952742 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:06:07 crc kubenswrapper[4701]: E1121 20:06:07.954447 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:06:20 crc kubenswrapper[4701]: I1121 20:06:20.951275 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:06:20 crc kubenswrapper[4701]: E1121 20:06:20.953491 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:06:34 crc kubenswrapper[4701]: I1121 20:06:34.952330 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:06:34 crc kubenswrapper[4701]: E1121 20:06:34.953576 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:06:45 crc kubenswrapper[4701]: I1121 20:06:45.952774 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:06:45 crc kubenswrapper[4701]: E1121 20:06:45.954501 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:06:58 crc kubenswrapper[4701]: I1121 20:06:58.952361 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:06:58 crc kubenswrapper[4701]: E1121 20:06:58.953835 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:07:12 crc kubenswrapper[4701]: I1121 20:07:12.951975 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:07:12 crc kubenswrapper[4701]: E1121 20:07:12.953257 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:07:23 crc kubenswrapper[4701]: I1121 20:07:23.955398 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:07:23 crc kubenswrapper[4701]: E1121 20:07:23.956652 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:07:37 crc kubenswrapper[4701]: I1121 20:07:37.952104 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:07:37 crc kubenswrapper[4701]: E1121 20:07:37.953665 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:07:52 crc kubenswrapper[4701]: I1121 20:07:52.951905 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:07:53 crc kubenswrapper[4701]: I1121 20:07:53.845665 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"6706495ab43453a2a6197d3d5061ad4405e2c07064ba090fd9606d34dbc80441"} Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.761886 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-shhd6"] Nov 21 20:09:14 crc kubenswrapper[4701]: E1121 20:09:14.763499 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="extract-content" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.763527 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="extract-content" Nov 21 20:09:14 crc kubenswrapper[4701]: E1121 20:09:14.763597 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="registry-server" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.763612 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="registry-server" Nov 21 20:09:14 crc kubenswrapper[4701]: E1121 20:09:14.763649 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="extract-utilities" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.763663 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="extract-utilities" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.764049 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba9ac17d-b826-486c-9e7c-c78fed1ec3b5" containerName="registry-server" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.813433 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.849834 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-shhd6"] Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.954121 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-catalog-content\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.954266 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc57s\" (UniqueName: \"kubernetes.io/projected/133f5079-c434-4c5b-9acb-739313be209a-kube-api-access-sc57s\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:14 crc kubenswrapper[4701]: I1121 20:09:14.954308 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-utilities\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.056486 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-catalog-content\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.057103 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc57s\" (UniqueName: \"kubernetes.io/projected/133f5079-c434-4c5b-9acb-739313be209a-kube-api-access-sc57s\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.057155 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-utilities\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.058787 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-catalog-content\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.059173 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-utilities\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.078421 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc57s\" (UniqueName: \"kubernetes.io/projected/133f5079-c434-4c5b-9acb-739313be209a-kube-api-access-sc57s\") pod \"redhat-operators-shhd6\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.178265 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.718479 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-shhd6"] Nov 21 20:09:15 crc kubenswrapper[4701]: I1121 20:09:15.936865 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerStarted","Data":"63a853788f55ab655ff7a9083496ddbcc3b9f9f0c0a778720840f65733304049"} Nov 21 20:09:16 crc kubenswrapper[4701]: I1121 20:09:16.953271 4701 generic.go:334] "Generic (PLEG): container finished" podID="133f5079-c434-4c5b-9acb-739313be209a" containerID="8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e" exitCode=0 Nov 21 20:09:16 crc kubenswrapper[4701]: I1121 20:09:16.953379 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerDied","Data":"8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e"} Nov 21 20:09:16 crc kubenswrapper[4701]: I1121 20:09:16.956837 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:09:17 crc kubenswrapper[4701]: I1121 20:09:17.968087 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerStarted","Data":"98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f"} Nov 21 20:09:22 crc kubenswrapper[4701]: I1121 20:09:22.021871 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerDied","Data":"98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f"} Nov 21 20:09:22 crc kubenswrapper[4701]: I1121 20:09:22.021815 4701 generic.go:334] "Generic (PLEG): container finished" podID="133f5079-c434-4c5b-9acb-739313be209a" containerID="98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f" exitCode=0 Nov 21 20:09:23 crc kubenswrapper[4701]: I1121 20:09:23.043062 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerStarted","Data":"f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e"} Nov 21 20:09:23 crc kubenswrapper[4701]: I1121 20:09:23.088146 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-shhd6" podStartSLOduration=3.556955522 podStartE2EDuration="9.088116944s" podCreationTimestamp="2025-11-21 20:09:14 +0000 UTC" firstStartedPulling="2025-11-21 20:09:16.956545448 +0000 UTC m=+4047.741685485" lastFinishedPulling="2025-11-21 20:09:22.48770687 +0000 UTC m=+4053.272846907" observedRunningTime="2025-11-21 20:09:23.075803831 +0000 UTC m=+4053.860943898" watchObservedRunningTime="2025-11-21 20:09:23.088116944 +0000 UTC m=+4053.873257011" Nov 21 20:09:25 crc kubenswrapper[4701]: I1121 20:09:25.179416 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:25 crc kubenswrapper[4701]: I1121 20:09:25.180290 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:26 crc kubenswrapper[4701]: I1121 20:09:26.248547 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-shhd6" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="registry-server" probeResult="failure" output=< Nov 21 20:09:26 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:09:26 crc kubenswrapper[4701]: > Nov 21 20:09:36 crc kubenswrapper[4701]: I1121 20:09:36.227776 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-shhd6" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="registry-server" probeResult="failure" output=< Nov 21 20:09:36 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:09:36 crc kubenswrapper[4701]: > Nov 21 20:09:45 crc kubenswrapper[4701]: I1121 20:09:45.244362 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:45 crc kubenswrapper[4701]: I1121 20:09:45.321411 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:45 crc kubenswrapper[4701]: I1121 20:09:45.971728 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-shhd6"] Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.376784 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-shhd6" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="registry-server" containerID="cri-o://f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e" gracePeriod=2 Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.863056 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.987488 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-catalog-content\") pod \"133f5079-c434-4c5b-9acb-739313be209a\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.987903 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc57s\" (UniqueName: \"kubernetes.io/projected/133f5079-c434-4c5b-9acb-739313be209a-kube-api-access-sc57s\") pod \"133f5079-c434-4c5b-9acb-739313be209a\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.987973 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-utilities\") pod \"133f5079-c434-4c5b-9acb-739313be209a\" (UID: \"133f5079-c434-4c5b-9acb-739313be209a\") " Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.989224 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-utilities" (OuterVolumeSpecName: "utilities") pod "133f5079-c434-4c5b-9acb-739313be209a" (UID: "133f5079-c434-4c5b-9acb-739313be209a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:09:46 crc kubenswrapper[4701]: I1121 20:09:46.994610 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/133f5079-c434-4c5b-9acb-739313be209a-kube-api-access-sc57s" (OuterVolumeSpecName: "kube-api-access-sc57s") pod "133f5079-c434-4c5b-9acb-739313be209a" (UID: "133f5079-c434-4c5b-9acb-739313be209a"). InnerVolumeSpecName "kube-api-access-sc57s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.090818 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "133f5079-c434-4c5b-9acb-739313be209a" (UID: "133f5079-c434-4c5b-9acb-739313be209a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.091617 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.091787 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f5079-c434-4c5b-9acb-739313be209a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.091811 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc57s\" (UniqueName: \"kubernetes.io/projected/133f5079-c434-4c5b-9acb-739313be209a-kube-api-access-sc57s\") on node \"crc\" DevicePath \"\"" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.393289 4701 generic.go:334] "Generic (PLEG): container finished" podID="133f5079-c434-4c5b-9acb-739313be209a" containerID="f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e" exitCode=0 Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.393341 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerDied","Data":"f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e"} Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.393374 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shhd6" event={"ID":"133f5079-c434-4c5b-9acb-739313be209a","Type":"ContainerDied","Data":"63a853788f55ab655ff7a9083496ddbcc3b9f9f0c0a778720840f65733304049"} Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.393393 4701 scope.go:117] "RemoveContainer" containerID="f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.393393 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shhd6" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.425522 4701 scope.go:117] "RemoveContainer" containerID="98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.427982 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-shhd6"] Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.436136 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-shhd6"] Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.476540 4701 scope.go:117] "RemoveContainer" containerID="8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.518345 4701 scope.go:117] "RemoveContainer" containerID="f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e" Nov 21 20:09:47 crc kubenswrapper[4701]: E1121 20:09:47.519390 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e\": container with ID starting with f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e not found: ID does not exist" containerID="f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.519471 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e"} err="failed to get container status \"f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e\": rpc error: code = NotFound desc = could not find container \"f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e\": container with ID starting with f964d903e62380801c0076643cfe974385e4421aea3acd1292025b28349b8c0e not found: ID does not exist" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.519514 4701 scope.go:117] "RemoveContainer" containerID="98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f" Nov 21 20:09:47 crc kubenswrapper[4701]: E1121 20:09:47.520082 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f\": container with ID starting with 98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f not found: ID does not exist" containerID="98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.520154 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f"} err="failed to get container status \"98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f\": rpc error: code = NotFound desc = could not find container \"98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f\": container with ID starting with 98cc148dff9ab38a758b8f66e5627756ac2b146750b0ead4abe0b71bc3f7f50f not found: ID does not exist" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.520235 4701 scope.go:117] "RemoveContainer" containerID="8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e" Nov 21 20:09:47 crc kubenswrapper[4701]: E1121 20:09:47.520726 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e\": container with ID starting with 8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e not found: ID does not exist" containerID="8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.520785 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e"} err="failed to get container status \"8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e\": rpc error: code = NotFound desc = could not find container \"8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e\": container with ID starting with 8bd349a6f343db0c7e1dcab72e6e41505662d7532510428357a835312913907e not found: ID does not exist" Nov 21 20:09:47 crc kubenswrapper[4701]: I1121 20:09:47.971786 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="133f5079-c434-4c5b-9acb-739313be209a" path="/var/lib/kubelet/pods/133f5079-c434-4c5b-9acb-739313be209a/volumes" Nov 21 20:10:18 crc kubenswrapper[4701]: I1121 20:10:18.613269 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:10:18 crc kubenswrapper[4701]: I1121 20:10:18.614125 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:10:48 crc kubenswrapper[4701]: I1121 20:10:48.614176 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:10:48 crc kubenswrapper[4701]: I1121 20:10:48.615018 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:11:18 crc kubenswrapper[4701]: I1121 20:11:18.613860 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:11:18 crc kubenswrapper[4701]: I1121 20:11:18.615034 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:11:18 crc kubenswrapper[4701]: I1121 20:11:18.615127 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:11:18 crc kubenswrapper[4701]: I1121 20:11:18.616637 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6706495ab43453a2a6197d3d5061ad4405e2c07064ba090fd9606d34dbc80441"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:11:18 crc kubenswrapper[4701]: I1121 20:11:18.616942 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://6706495ab43453a2a6197d3d5061ad4405e2c07064ba090fd9606d34dbc80441" gracePeriod=600 Nov 21 20:11:19 crc kubenswrapper[4701]: I1121 20:11:19.632150 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="6706495ab43453a2a6197d3d5061ad4405e2c07064ba090fd9606d34dbc80441" exitCode=0 Nov 21 20:11:19 crc kubenswrapper[4701]: I1121 20:11:19.632266 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"6706495ab43453a2a6197d3d5061ad4405e2c07064ba090fd9606d34dbc80441"} Nov 21 20:11:19 crc kubenswrapper[4701]: I1121 20:11:19.632950 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080"} Nov 21 20:11:19 crc kubenswrapper[4701]: I1121 20:11:19.632986 4701 scope.go:117] "RemoveContainer" containerID="7e1dcf3e5eec58d800cf03f8bc8e23257bc0c39c02d454c55e79aa0ba3adf8a7" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.360107 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-phldd"] Nov 21 20:11:25 crc kubenswrapper[4701]: E1121 20:11:25.361290 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="registry-server" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.361307 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="registry-server" Nov 21 20:11:25 crc kubenswrapper[4701]: E1121 20:11:25.361318 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="extract-content" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.361325 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="extract-content" Nov 21 20:11:25 crc kubenswrapper[4701]: E1121 20:11:25.361340 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="extract-utilities" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.361348 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="extract-utilities" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.361625 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="133f5079-c434-4c5b-9acb-739313be209a" containerName="registry-server" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.363516 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.370002 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-phldd"] Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.473260 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd69g\" (UniqueName: \"kubernetes.io/projected/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-kube-api-access-sd69g\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.473685 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-catalog-content\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.473713 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-utilities\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.576166 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd69g\" (UniqueName: \"kubernetes.io/projected/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-kube-api-access-sd69g\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.576304 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-catalog-content\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.576328 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-utilities\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.576792 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-utilities\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.577148 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-catalog-content\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.606737 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd69g\" (UniqueName: \"kubernetes.io/projected/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-kube-api-access-sd69g\") pod \"redhat-marketplace-phldd\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:25 crc kubenswrapper[4701]: I1121 20:11:25.693339 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:26 crc kubenswrapper[4701]: I1121 20:11:26.234363 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-phldd"] Nov 21 20:11:26 crc kubenswrapper[4701]: I1121 20:11:26.737142 4701 generic.go:334] "Generic (PLEG): container finished" podID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerID="362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81" exitCode=0 Nov 21 20:11:26 crc kubenswrapper[4701]: I1121 20:11:26.737235 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerDied","Data":"362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81"} Nov 21 20:11:26 crc kubenswrapper[4701]: I1121 20:11:26.737668 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerStarted","Data":"da24d101bcf065dce0ebfec3848ab7324d8f3ee39a73847f510b2bd8204362fb"} Nov 21 20:11:27 crc kubenswrapper[4701]: I1121 20:11:27.747887 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerStarted","Data":"e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a"} Nov 21 20:11:28 crc kubenswrapper[4701]: I1121 20:11:28.763468 4701 generic.go:334] "Generic (PLEG): container finished" podID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerID="e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a" exitCode=0 Nov 21 20:11:28 crc kubenswrapper[4701]: I1121 20:11:28.763575 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerDied","Data":"e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a"} Nov 21 20:11:29 crc kubenswrapper[4701]: I1121 20:11:29.775838 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerStarted","Data":"48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349"} Nov 21 20:11:29 crc kubenswrapper[4701]: I1121 20:11:29.802946 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-phldd" podStartSLOduration=2.264561896 podStartE2EDuration="4.802917226s" podCreationTimestamp="2025-11-21 20:11:25 +0000 UTC" firstStartedPulling="2025-11-21 20:11:26.741992865 +0000 UTC m=+4177.527132932" lastFinishedPulling="2025-11-21 20:11:29.280348225 +0000 UTC m=+4180.065488262" observedRunningTime="2025-11-21 20:11:29.798638503 +0000 UTC m=+4180.583778530" watchObservedRunningTime="2025-11-21 20:11:29.802917226 +0000 UTC m=+4180.588057293" Nov 21 20:11:35 crc kubenswrapper[4701]: I1121 20:11:35.694606 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:35 crc kubenswrapper[4701]: I1121 20:11:35.695767 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:35 crc kubenswrapper[4701]: I1121 20:11:35.775852 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:35 crc kubenswrapper[4701]: I1121 20:11:35.932918 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:36 crc kubenswrapper[4701]: I1121 20:11:36.035364 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-phldd"] Nov 21 20:11:37 crc kubenswrapper[4701]: I1121 20:11:37.932736 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-phldd" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="registry-server" containerID="cri-o://48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349" gracePeriod=2 Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.472221 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.565127 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-utilities\") pod \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.565340 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd69g\" (UniqueName: \"kubernetes.io/projected/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-kube-api-access-sd69g\") pod \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.565451 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-catalog-content\") pod \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\" (UID: \"d73f3f45-fb7b-4c25-a3f6-3464b90e1024\") " Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.567052 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-utilities" (OuterVolumeSpecName: "utilities") pod "d73f3f45-fb7b-4c25-a3f6-3464b90e1024" (UID: "d73f3f45-fb7b-4c25-a3f6-3464b90e1024"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.572401 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-kube-api-access-sd69g" (OuterVolumeSpecName: "kube-api-access-sd69g") pod "d73f3f45-fb7b-4c25-a3f6-3464b90e1024" (UID: "d73f3f45-fb7b-4c25-a3f6-3464b90e1024"). InnerVolumeSpecName "kube-api-access-sd69g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.585241 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d73f3f45-fb7b-4c25-a3f6-3464b90e1024" (UID: "d73f3f45-fb7b-4c25-a3f6-3464b90e1024"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.673094 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.673147 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.673220 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd69g\" (UniqueName: \"kubernetes.io/projected/d73f3f45-fb7b-4c25-a3f6-3464b90e1024-kube-api-access-sd69g\") on node \"crc\" DevicePath \"\"" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.950441 4701 generic.go:334] "Generic (PLEG): container finished" podID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerID="48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349" exitCode=0 Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.950519 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerDied","Data":"48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349"} Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.950600 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-phldd" event={"ID":"d73f3f45-fb7b-4c25-a3f6-3464b90e1024","Type":"ContainerDied","Data":"da24d101bcf065dce0ebfec3848ab7324d8f3ee39a73847f510b2bd8204362fb"} Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.950647 4701 scope.go:117] "RemoveContainer" containerID="48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.950640 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-phldd" Nov 21 20:11:38 crc kubenswrapper[4701]: I1121 20:11:38.984013 4701 scope.go:117] "RemoveContainer" containerID="e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.032959 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-phldd"] Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.047052 4701 scope.go:117] "RemoveContainer" containerID="362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.049016 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-phldd"] Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.085971 4701 scope.go:117] "RemoveContainer" containerID="48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349" Nov 21 20:11:39 crc kubenswrapper[4701]: E1121 20:11:39.086776 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349\": container with ID starting with 48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349 not found: ID does not exist" containerID="48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.086833 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349"} err="failed to get container status \"48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349\": rpc error: code = NotFound desc = could not find container \"48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349\": container with ID starting with 48daf2d1afe80d11f1546a9d2da81c90b0b36a10a4f912ffcf76710a45c74349 not found: ID does not exist" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.086871 4701 scope.go:117] "RemoveContainer" containerID="e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a" Nov 21 20:11:39 crc kubenswrapper[4701]: E1121 20:11:39.087180 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a\": container with ID starting with e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a not found: ID does not exist" containerID="e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.087232 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a"} err="failed to get container status \"e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a\": rpc error: code = NotFound desc = could not find container \"e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a\": container with ID starting with e1048fe7026eacc4a7c53785ccbde9cbbcb5976a737bfee4d5ac62913f15136a not found: ID does not exist" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.087249 4701 scope.go:117] "RemoveContainer" containerID="362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81" Nov 21 20:11:39 crc kubenswrapper[4701]: E1121 20:11:39.087678 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81\": container with ID starting with 362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81 not found: ID does not exist" containerID="362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.087707 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81"} err="failed to get container status \"362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81\": rpc error: code = NotFound desc = could not find container \"362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81\": container with ID starting with 362a8d57e745c253c05b1fa0c5a0f53e58472a0b3ec80d52252fc664fe537e81 not found: ID does not exist" Nov 21 20:11:39 crc kubenswrapper[4701]: I1121 20:11:39.963645 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" path="/var/lib/kubelet/pods/d73f3f45-fb7b-4c25-a3f6-3464b90e1024/volumes" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.911888 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-24gww"] Nov 21 20:13:01 crc kubenswrapper[4701]: E1121 20:13:01.912909 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="extract-content" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.912927 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="extract-content" Nov 21 20:13:01 crc kubenswrapper[4701]: E1121 20:13:01.912952 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="registry-server" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.912960 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="registry-server" Nov 21 20:13:01 crc kubenswrapper[4701]: E1121 20:13:01.912994 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="extract-utilities" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.913001 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="extract-utilities" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.913275 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d73f3f45-fb7b-4c25-a3f6-3464b90e1024" containerName="registry-server" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.914818 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:01 crc kubenswrapper[4701]: I1121 20:13:01.935250 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24gww"] Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.063986 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-catalog-content\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.064076 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-utilities\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.064209 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frtf2\" (UniqueName: \"kubernetes.io/projected/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-kube-api-access-frtf2\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.166062 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-utilities\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.166153 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frtf2\" (UniqueName: \"kubernetes.io/projected/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-kube-api-access-frtf2\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.166260 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-catalog-content\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.166632 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-utilities\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.166881 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-catalog-content\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.196942 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frtf2\" (UniqueName: \"kubernetes.io/projected/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-kube-api-access-frtf2\") pod \"community-operators-24gww\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.264010 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:02 crc kubenswrapper[4701]: I1121 20:13:02.891051 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24gww"] Nov 21 20:13:03 crc kubenswrapper[4701]: I1121 20:13:03.056388 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerStarted","Data":"65e55ab836cd825ee02cfef683ba3a8eab846f5b2a476b465f927cdfdb32f8bb"} Nov 21 20:13:04 crc kubenswrapper[4701]: I1121 20:13:04.069928 4701 generic.go:334] "Generic (PLEG): container finished" podID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerID="129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de" exitCode=0 Nov 21 20:13:04 crc kubenswrapper[4701]: I1121 20:13:04.070014 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerDied","Data":"129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de"} Nov 21 20:13:05 crc kubenswrapper[4701]: I1121 20:13:05.800989 4701 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="b6432247-ed58-4dce-98d4-4267d0122151" containerName="galera" probeResult="failure" output="command timed out" Nov 21 20:13:05 crc kubenswrapper[4701]: I1121 20:13:05.801011 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="b6432247-ed58-4dce-98d4-4267d0122151" containerName="galera" probeResult="failure" output="command timed out" Nov 21 20:13:06 crc kubenswrapper[4701]: I1121 20:13:06.108929 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerStarted","Data":"fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5"} Nov 21 20:13:07 crc kubenswrapper[4701]: I1121 20:13:07.141928 4701 generic.go:334] "Generic (PLEG): container finished" podID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerID="fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5" exitCode=0 Nov 21 20:13:07 crc kubenswrapper[4701]: I1121 20:13:07.142006 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerDied","Data":"fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5"} Nov 21 20:13:08 crc kubenswrapper[4701]: I1121 20:13:08.166411 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerStarted","Data":"ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203"} Nov 21 20:13:08 crc kubenswrapper[4701]: I1121 20:13:08.213309 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-24gww" podStartSLOduration=3.67615026 podStartE2EDuration="7.213274415s" podCreationTimestamp="2025-11-21 20:13:01 +0000 UTC" firstStartedPulling="2025-11-21 20:13:04.075937927 +0000 UTC m=+4274.861077944" lastFinishedPulling="2025-11-21 20:13:07.613062032 +0000 UTC m=+4278.398202099" observedRunningTime="2025-11-21 20:13:08.196382215 +0000 UTC m=+4278.981522282" watchObservedRunningTime="2025-11-21 20:13:08.213274415 +0000 UTC m=+4278.998414472" Nov 21 20:13:12 crc kubenswrapper[4701]: I1121 20:13:12.264634 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:12 crc kubenswrapper[4701]: I1121 20:13:12.265935 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:12 crc kubenswrapper[4701]: I1121 20:13:12.329093 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:13 crc kubenswrapper[4701]: I1121 20:13:13.266412 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:13 crc kubenswrapper[4701]: I1121 20:13:13.328321 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-24gww"] Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.239861 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-24gww" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="registry-server" containerID="cri-o://ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203" gracePeriod=2 Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.777751 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.915507 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-utilities\") pod \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.915675 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-catalog-content\") pod \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.915813 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frtf2\" (UniqueName: \"kubernetes.io/projected/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-kube-api-access-frtf2\") pod \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\" (UID: \"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8\") " Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.917325 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-utilities" (OuterVolumeSpecName: "utilities") pod "ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" (UID: "ce6d22f5-5a1b-455a-be47-b6e0b64c14d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:13:15 crc kubenswrapper[4701]: I1121 20:13:15.922768 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-kube-api-access-frtf2" (OuterVolumeSpecName: "kube-api-access-frtf2") pod "ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" (UID: "ce6d22f5-5a1b-455a-be47-b6e0b64c14d8"). InnerVolumeSpecName "kube-api-access-frtf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.002091 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" (UID: "ce6d22f5-5a1b-455a-be47-b6e0b64c14d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.019121 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.019152 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frtf2\" (UniqueName: \"kubernetes.io/projected/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-kube-api-access-frtf2\") on node \"crc\" DevicePath \"\"" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.019169 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.256502 4701 generic.go:334] "Generic (PLEG): container finished" podID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerID="ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203" exitCode=0 Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.256585 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerDied","Data":"ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203"} Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.257008 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24gww" event={"ID":"ce6d22f5-5a1b-455a-be47-b6e0b64c14d8","Type":"ContainerDied","Data":"65e55ab836cd825ee02cfef683ba3a8eab846f5b2a476b465f927cdfdb32f8bb"} Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.257050 4701 scope.go:117] "RemoveContainer" containerID="ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.256621 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24gww" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.294711 4701 scope.go:117] "RemoveContainer" containerID="fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.321221 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-24gww"] Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.335122 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-24gww"] Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.361704 4701 scope.go:117] "RemoveContainer" containerID="129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.395320 4701 scope.go:117] "RemoveContainer" containerID="ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203" Nov 21 20:13:16 crc kubenswrapper[4701]: E1121 20:13:16.396089 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203\": container with ID starting with ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203 not found: ID does not exist" containerID="ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.396122 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203"} err="failed to get container status \"ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203\": rpc error: code = NotFound desc = could not find container \"ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203\": container with ID starting with ae225e20e9e51a3fa225971c32c0e227ea8cacbbc4b904da2fd40a091c4e6203 not found: ID does not exist" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.396146 4701 scope.go:117] "RemoveContainer" containerID="fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5" Nov 21 20:13:16 crc kubenswrapper[4701]: E1121 20:13:16.396805 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5\": container with ID starting with fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5 not found: ID does not exist" containerID="fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.396827 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5"} err="failed to get container status \"fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5\": rpc error: code = NotFound desc = could not find container \"fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5\": container with ID starting with fe35bb9b6c2e3e711964764070439a41f1e12507beb5e5ab93cef3bd2b4629e5 not found: ID does not exist" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.396843 4701 scope.go:117] "RemoveContainer" containerID="129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de" Nov 21 20:13:16 crc kubenswrapper[4701]: E1121 20:13:16.397380 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de\": container with ID starting with 129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de not found: ID does not exist" containerID="129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de" Nov 21 20:13:16 crc kubenswrapper[4701]: I1121 20:13:16.397404 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de"} err="failed to get container status \"129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de\": rpc error: code = NotFound desc = could not find container \"129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de\": container with ID starting with 129d3ba53f056fed9dea4a7a1aa686beeb1e75949e0080e463231326e1a378de not found: ID does not exist" Nov 21 20:13:17 crc kubenswrapper[4701]: I1121 20:13:17.976472 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" path="/var/lib/kubelet/pods/ce6d22f5-5a1b-455a-be47-b6e0b64c14d8/volumes" Nov 21 20:13:48 crc kubenswrapper[4701]: I1121 20:13:48.613486 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:13:48 crc kubenswrapper[4701]: I1121 20:13:48.614441 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:14:18 crc kubenswrapper[4701]: I1121 20:14:18.613307 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:14:18 crc kubenswrapper[4701]: I1121 20:14:18.614062 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:14:48 crc kubenswrapper[4701]: I1121 20:14:48.613547 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:14:48 crc kubenswrapper[4701]: I1121 20:14:48.614562 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:14:48 crc kubenswrapper[4701]: I1121 20:14:48.614637 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:14:48 crc kubenswrapper[4701]: I1121 20:14:48.615863 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:14:48 crc kubenswrapper[4701]: I1121 20:14:48.615975 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" gracePeriod=600 Nov 21 20:14:48 crc kubenswrapper[4701]: E1121 20:14:48.747446 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:14:49 crc kubenswrapper[4701]: I1121 20:14:49.467599 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" exitCode=0 Nov 21 20:14:49 crc kubenswrapper[4701]: I1121 20:14:49.467684 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080"} Nov 21 20:14:49 crc kubenswrapper[4701]: I1121 20:14:49.468121 4701 scope.go:117] "RemoveContainer" containerID="6706495ab43453a2a6197d3d5061ad4405e2c07064ba090fd9606d34dbc80441" Nov 21 20:14:49 crc kubenswrapper[4701]: I1121 20:14:49.469348 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:14:49 crc kubenswrapper[4701]: E1121 20:14:49.470000 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.622349 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tjc97"] Nov 21 20:14:53 crc kubenswrapper[4701]: E1121 20:14:53.624891 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="registry-server" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.624914 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="registry-server" Nov 21 20:14:53 crc kubenswrapper[4701]: E1121 20:14:53.624945 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="extract-utilities" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.624951 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="extract-utilities" Nov 21 20:14:53 crc kubenswrapper[4701]: E1121 20:14:53.624959 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="extract-content" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.624966 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="extract-content" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.625159 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce6d22f5-5a1b-455a-be47-b6e0b64c14d8" containerName="registry-server" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.627021 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.634380 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjc97"] Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.777519 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfsgg\" (UniqueName: \"kubernetes.io/projected/d5462eeb-0a00-423c-8409-64a02f589ed0-kube-api-access-wfsgg\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.778450 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-catalog-content\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.778571 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-utilities\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.881999 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-catalog-content\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.882121 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-utilities\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.882252 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfsgg\" (UniqueName: \"kubernetes.io/projected/d5462eeb-0a00-423c-8409-64a02f589ed0-kube-api-access-wfsgg\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.882478 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-catalog-content\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.882652 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-utilities\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.915300 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfsgg\" (UniqueName: \"kubernetes.io/projected/d5462eeb-0a00-423c-8409-64a02f589ed0-kube-api-access-wfsgg\") pod \"certified-operators-tjc97\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:53 crc kubenswrapper[4701]: I1121 20:14:53.960075 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:14:54 crc kubenswrapper[4701]: I1121 20:14:54.500661 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjc97"] Nov 21 20:14:54 crc kubenswrapper[4701]: I1121 20:14:54.537327 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerStarted","Data":"c6dce8f7b58111a21ab67aa5e43f96591c425d45b30dccd3c33c9a27a6a99603"} Nov 21 20:14:55 crc kubenswrapper[4701]: I1121 20:14:55.553699 4701 generic.go:334] "Generic (PLEG): container finished" podID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerID="8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724" exitCode=0 Nov 21 20:14:55 crc kubenswrapper[4701]: I1121 20:14:55.553788 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerDied","Data":"8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724"} Nov 21 20:14:55 crc kubenswrapper[4701]: I1121 20:14:55.558686 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:14:56 crc kubenswrapper[4701]: I1121 20:14:56.567575 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerStarted","Data":"dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7"} Nov 21 20:14:58 crc kubenswrapper[4701]: I1121 20:14:58.631722 4701 generic.go:334] "Generic (PLEG): container finished" podID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerID="dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7" exitCode=0 Nov 21 20:14:58 crc kubenswrapper[4701]: I1121 20:14:58.632000 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerDied","Data":"dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7"} Nov 21 20:14:59 crc kubenswrapper[4701]: I1121 20:14:59.666177 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerStarted","Data":"8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275"} Nov 21 20:14:59 crc kubenswrapper[4701]: I1121 20:14:59.691680 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tjc97" podStartSLOduration=3.054249823 podStartE2EDuration="6.691650223s" podCreationTimestamp="2025-11-21 20:14:53 +0000 UTC" firstStartedPulling="2025-11-21 20:14:55.557718105 +0000 UTC m=+4386.342858172" lastFinishedPulling="2025-11-21 20:14:59.195118535 +0000 UTC m=+4389.980258572" observedRunningTime="2025-11-21 20:14:59.68929688 +0000 UTC m=+4390.474436907" watchObservedRunningTime="2025-11-21 20:14:59.691650223 +0000 UTC m=+4390.476790290" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.177237 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2"] Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.179156 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.181845 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.182560 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.207611 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2"] Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.350318 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55b0dc90-c132-4aab-abcf-a97463032b37-config-volume\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.350443 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58hwh\" (UniqueName: \"kubernetes.io/projected/55b0dc90-c132-4aab-abcf-a97463032b37-kube-api-access-58hwh\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.350857 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55b0dc90-c132-4aab-abcf-a97463032b37-secret-volume\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.453262 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55b0dc90-c132-4aab-abcf-a97463032b37-config-volume\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.453327 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58hwh\" (UniqueName: \"kubernetes.io/projected/55b0dc90-c132-4aab-abcf-a97463032b37-kube-api-access-58hwh\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.453401 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55b0dc90-c132-4aab-abcf-a97463032b37-secret-volume\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.456293 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55b0dc90-c132-4aab-abcf-a97463032b37-config-volume\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.464000 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55b0dc90-c132-4aab-abcf-a97463032b37-secret-volume\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.474966 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58hwh\" (UniqueName: \"kubernetes.io/projected/55b0dc90-c132-4aab-abcf-a97463032b37-kube-api-access-58hwh\") pod \"collect-profiles-29395935-cbww2\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:00 crc kubenswrapper[4701]: I1121 20:15:00.517272 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:01 crc kubenswrapper[4701]: I1121 20:15:01.029101 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2"] Nov 21 20:15:01 crc kubenswrapper[4701]: W1121 20:15:01.031855 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55b0dc90_c132_4aab_abcf_a97463032b37.slice/crio-2f5362c0dc4f3d664abb1760ecb93c3f6ddcc952a47ddfdf2d181ff87b376e7d WatchSource:0}: Error finding container 2f5362c0dc4f3d664abb1760ecb93c3f6ddcc952a47ddfdf2d181ff87b376e7d: Status 404 returned error can't find the container with id 2f5362c0dc4f3d664abb1760ecb93c3f6ddcc952a47ddfdf2d181ff87b376e7d Nov 21 20:15:01 crc kubenswrapper[4701]: I1121 20:15:01.692175 4701 generic.go:334] "Generic (PLEG): container finished" podID="55b0dc90-c132-4aab-abcf-a97463032b37" containerID="db1cbff889c42d474fabdc35d545b028e81885be4ae49711d71e21701e428736" exitCode=0 Nov 21 20:15:01 crc kubenswrapper[4701]: I1121 20:15:01.692383 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" event={"ID":"55b0dc90-c132-4aab-abcf-a97463032b37","Type":"ContainerDied","Data":"db1cbff889c42d474fabdc35d545b028e81885be4ae49711d71e21701e428736"} Nov 21 20:15:01 crc kubenswrapper[4701]: I1121 20:15:01.692657 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" event={"ID":"55b0dc90-c132-4aab-abcf-a97463032b37","Type":"ContainerStarted","Data":"2f5362c0dc4f3d664abb1760ecb93c3f6ddcc952a47ddfdf2d181ff87b376e7d"} Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.163261 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.321005 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55b0dc90-c132-4aab-abcf-a97463032b37-config-volume\") pod \"55b0dc90-c132-4aab-abcf-a97463032b37\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.321464 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58hwh\" (UniqueName: \"kubernetes.io/projected/55b0dc90-c132-4aab-abcf-a97463032b37-kube-api-access-58hwh\") pod \"55b0dc90-c132-4aab-abcf-a97463032b37\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.321554 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55b0dc90-c132-4aab-abcf-a97463032b37-secret-volume\") pod \"55b0dc90-c132-4aab-abcf-a97463032b37\" (UID: \"55b0dc90-c132-4aab-abcf-a97463032b37\") " Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.321999 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55b0dc90-c132-4aab-abcf-a97463032b37-config-volume" (OuterVolumeSpecName: "config-volume") pod "55b0dc90-c132-4aab-abcf-a97463032b37" (UID: "55b0dc90-c132-4aab-abcf-a97463032b37"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.322620 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55b0dc90-c132-4aab-abcf-a97463032b37-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.334264 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b0dc90-c132-4aab-abcf-a97463032b37-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "55b0dc90-c132-4aab-abcf-a97463032b37" (UID: "55b0dc90-c132-4aab-abcf-a97463032b37"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.335916 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55b0dc90-c132-4aab-abcf-a97463032b37-kube-api-access-58hwh" (OuterVolumeSpecName: "kube-api-access-58hwh") pod "55b0dc90-c132-4aab-abcf-a97463032b37" (UID: "55b0dc90-c132-4aab-abcf-a97463032b37"). InnerVolumeSpecName "kube-api-access-58hwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.425305 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55b0dc90-c132-4aab-abcf-a97463032b37-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.425361 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58hwh\" (UniqueName: \"kubernetes.io/projected/55b0dc90-c132-4aab-abcf-a97463032b37-kube-api-access-58hwh\") on node \"crc\" DevicePath \"\"" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.720158 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" event={"ID":"55b0dc90-c132-4aab-abcf-a97463032b37","Type":"ContainerDied","Data":"2f5362c0dc4f3d664abb1760ecb93c3f6ddcc952a47ddfdf2d181ff87b376e7d"} Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.720231 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f5362c0dc4f3d664abb1760ecb93c3f6ddcc952a47ddfdf2d181ff87b376e7d" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.720252 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395935-cbww2" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.952318 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:15:03 crc kubenswrapper[4701]: E1121 20:15:03.952722 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.978615 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:15:03 crc kubenswrapper[4701]: I1121 20:15:03.978671 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:15:04 crc kubenswrapper[4701]: I1121 20:15:04.252117 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt"] Nov 21 20:15:04 crc kubenswrapper[4701]: I1121 20:15:04.259978 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395890-j7xwt"] Nov 21 20:15:05 crc kubenswrapper[4701]: I1121 20:15:05.034848 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-tjc97" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="registry-server" probeResult="failure" output=< Nov 21 20:15:05 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:15:05 crc kubenswrapper[4701]: > Nov 21 20:15:05 crc kubenswrapper[4701]: I1121 20:15:05.968727 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="379b0a05-e937-47ce-90c4-5fea3738796b" path="/var/lib/kubelet/pods/379b0a05-e937-47ce-90c4-5fea3738796b/volumes" Nov 21 20:15:12 crc kubenswrapper[4701]: I1121 20:15:12.012489 4701 scope.go:117] "RemoveContainer" containerID="401821c614254f25c4ebb5efe6d53dc187b9a615601207ba2bf952d6145067be" Nov 21 20:15:14 crc kubenswrapper[4701]: I1121 20:15:14.021690 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:15:14 crc kubenswrapper[4701]: I1121 20:15:14.097773 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:15:14 crc kubenswrapper[4701]: I1121 20:15:14.260831 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjc97"] Nov 21 20:15:15 crc kubenswrapper[4701]: I1121 20:15:15.866066 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tjc97" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="registry-server" containerID="cri-o://8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275" gracePeriod=2 Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.428025 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.555192 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-utilities\") pod \"d5462eeb-0a00-423c-8409-64a02f589ed0\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.555253 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-catalog-content\") pod \"d5462eeb-0a00-423c-8409-64a02f589ed0\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.555402 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfsgg\" (UniqueName: \"kubernetes.io/projected/d5462eeb-0a00-423c-8409-64a02f589ed0-kube-api-access-wfsgg\") pod \"d5462eeb-0a00-423c-8409-64a02f589ed0\" (UID: \"d5462eeb-0a00-423c-8409-64a02f589ed0\") " Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.564767 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-utilities" (OuterVolumeSpecName: "utilities") pod "d5462eeb-0a00-423c-8409-64a02f589ed0" (UID: "d5462eeb-0a00-423c-8409-64a02f589ed0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.568911 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5462eeb-0a00-423c-8409-64a02f589ed0-kube-api-access-wfsgg" (OuterVolumeSpecName: "kube-api-access-wfsgg") pod "d5462eeb-0a00-423c-8409-64a02f589ed0" (UID: "d5462eeb-0a00-423c-8409-64a02f589ed0"). InnerVolumeSpecName "kube-api-access-wfsgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.657915 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.657954 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfsgg\" (UniqueName: \"kubernetes.io/projected/d5462eeb-0a00-423c-8409-64a02f589ed0-kube-api-access-wfsgg\") on node \"crc\" DevicePath \"\"" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.658958 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5462eeb-0a00-423c-8409-64a02f589ed0" (UID: "d5462eeb-0a00-423c-8409-64a02f589ed0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.759496 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5462eeb-0a00-423c-8409-64a02f589ed0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.884218 4701 generic.go:334] "Generic (PLEG): container finished" podID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerID="8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275" exitCode=0 Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.884279 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerDied","Data":"8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275"} Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.884316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjc97" event={"ID":"d5462eeb-0a00-423c-8409-64a02f589ed0","Type":"ContainerDied","Data":"c6dce8f7b58111a21ab67aa5e43f96591c425d45b30dccd3c33c9a27a6a99603"} Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.884341 4701 scope.go:117] "RemoveContainer" containerID="8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.884534 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjc97" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.914678 4701 scope.go:117] "RemoveContainer" containerID="dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.943824 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjc97"] Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.952406 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:15:16 crc kubenswrapper[4701]: E1121 20:15:16.953698 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.952558 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tjc97"] Nov 21 20:15:16 crc kubenswrapper[4701]: I1121 20:15:16.971455 4701 scope.go:117] "RemoveContainer" containerID="8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.028941 4701 scope.go:117] "RemoveContainer" containerID="8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275" Nov 21 20:15:17 crc kubenswrapper[4701]: E1121 20:15:17.030128 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275\": container with ID starting with 8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275 not found: ID does not exist" containerID="8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.030437 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275"} err="failed to get container status \"8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275\": rpc error: code = NotFound desc = could not find container \"8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275\": container with ID starting with 8005d3e1965e8c27f6e4c7dbbaeb109f70d0b09c098eff1f1f0e7adb7ccd8275 not found: ID does not exist" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.030471 4701 scope.go:117] "RemoveContainer" containerID="dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7" Nov 21 20:15:17 crc kubenswrapper[4701]: E1121 20:15:17.031339 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7\": container with ID starting with dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7 not found: ID does not exist" containerID="dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.031368 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7"} err="failed to get container status \"dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7\": rpc error: code = NotFound desc = could not find container \"dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7\": container with ID starting with dae44732e5920d87ab89e00b4a73f2149d601e572160b9ed0fd90fa0cbf97bd7 not found: ID does not exist" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.031600 4701 scope.go:117] "RemoveContainer" containerID="8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724" Nov 21 20:15:17 crc kubenswrapper[4701]: E1121 20:15:17.032236 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724\": container with ID starting with 8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724 not found: ID does not exist" containerID="8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.032276 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724"} err="failed to get container status \"8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724\": rpc error: code = NotFound desc = could not find container \"8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724\": container with ID starting with 8295feaaa58bc1335854205cd656b00f420ffb9b8557caf6edddaa88bd3bd724 not found: ID does not exist" Nov 21 20:15:17 crc kubenswrapper[4701]: I1121 20:15:17.970194 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" path="/var/lib/kubelet/pods/d5462eeb-0a00-423c-8409-64a02f589ed0/volumes" Nov 21 20:15:28 crc kubenswrapper[4701]: I1121 20:15:28.951947 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:15:28 crc kubenswrapper[4701]: E1121 20:15:28.953010 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:15:41 crc kubenswrapper[4701]: I1121 20:15:41.952103 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:15:41 crc kubenswrapper[4701]: E1121 20:15:41.953525 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:15:54 crc kubenswrapper[4701]: I1121 20:15:54.951818 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:15:54 crc kubenswrapper[4701]: E1121 20:15:54.953522 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:16:09 crc kubenswrapper[4701]: I1121 20:16:09.967524 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:16:09 crc kubenswrapper[4701]: E1121 20:16:09.968862 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:16:23 crc kubenswrapper[4701]: I1121 20:16:23.952186 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:16:23 crc kubenswrapper[4701]: E1121 20:16:23.953889 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:16:36 crc kubenswrapper[4701]: I1121 20:16:36.951297 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:16:36 crc kubenswrapper[4701]: E1121 20:16:36.952442 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:16:49 crc kubenswrapper[4701]: I1121 20:16:49.958871 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:16:49 crc kubenswrapper[4701]: E1121 20:16:49.959716 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:17:02 crc kubenswrapper[4701]: I1121 20:17:02.951484 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:17:02 crc kubenswrapper[4701]: E1121 20:17:02.953792 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:17:16 crc kubenswrapper[4701]: I1121 20:17:16.951729 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:17:16 crc kubenswrapper[4701]: E1121 20:17:16.953323 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:17:29 crc kubenswrapper[4701]: I1121 20:17:29.972688 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:17:29 crc kubenswrapper[4701]: E1121 20:17:29.973974 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:17:42 crc kubenswrapper[4701]: I1121 20:17:42.951713 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:17:42 crc kubenswrapper[4701]: E1121 20:17:42.953463 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:17:54 crc kubenswrapper[4701]: I1121 20:17:54.951463 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:17:54 crc kubenswrapper[4701]: E1121 20:17:54.952797 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:18:07 crc kubenswrapper[4701]: I1121 20:18:07.951645 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:18:07 crc kubenswrapper[4701]: E1121 20:18:07.952888 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:18:18 crc kubenswrapper[4701]: I1121 20:18:18.952160 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:18:18 crc kubenswrapper[4701]: E1121 20:18:18.953602 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:18:29 crc kubenswrapper[4701]: I1121 20:18:29.958258 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:18:29 crc kubenswrapper[4701]: E1121 20:18:29.959022 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:18:44 crc kubenswrapper[4701]: I1121 20:18:44.951858 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:18:44 crc kubenswrapper[4701]: E1121 20:18:44.952751 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:18:46 crc kubenswrapper[4701]: E1121 20:18:46.037847 4701 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.12:33538->38.102.83.12:39339: write tcp 38.102.83.12:33538->38.102.83.12:39339: write: broken pipe Nov 21 20:18:55 crc kubenswrapper[4701]: I1121 20:18:55.953189 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:18:55 crc kubenswrapper[4701]: E1121 20:18:55.954434 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:19:08 crc kubenswrapper[4701]: I1121 20:19:08.952173 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:19:08 crc kubenswrapper[4701]: E1121 20:19:08.953566 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:19:23 crc kubenswrapper[4701]: I1121 20:19:23.952138 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:19:23 crc kubenswrapper[4701]: E1121 20:19:23.953230 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:19:35 crc kubenswrapper[4701]: I1121 20:19:35.952242 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:19:35 crc kubenswrapper[4701]: E1121 20:19:35.953320 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:19:48 crc kubenswrapper[4701]: I1121 20:19:48.951684 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:19:49 crc kubenswrapper[4701]: I1121 20:19:49.754045 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"be6a0899714ace0ec892ba2c0def9dffaa232cf3e4bfd85df650721c6e9c5c6e"} Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.636400 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-27g5z"] Nov 21 20:21:03 crc kubenswrapper[4701]: E1121 20:21:03.637739 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="extract-content" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.637757 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="extract-content" Nov 21 20:21:03 crc kubenswrapper[4701]: E1121 20:21:03.637778 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="registry-server" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.637786 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="registry-server" Nov 21 20:21:03 crc kubenswrapper[4701]: E1121 20:21:03.637805 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b0dc90-c132-4aab-abcf-a97463032b37" containerName="collect-profiles" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.637812 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b0dc90-c132-4aab-abcf-a97463032b37" containerName="collect-profiles" Nov 21 20:21:03 crc kubenswrapper[4701]: E1121 20:21:03.637833 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="extract-utilities" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.637841 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="extract-utilities" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.638101 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b0dc90-c132-4aab-abcf-a97463032b37" containerName="collect-profiles" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.638115 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5462eeb-0a00-423c-8409-64a02f589ed0" containerName="registry-server" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.640070 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.659979 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-27g5z"] Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.759593 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-catalog-content\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.760156 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-utilities\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.760448 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tx2f\" (UniqueName: \"kubernetes.io/projected/4dd225b7-8c02-4c95-ab74-ab7950a495c7-kube-api-access-9tx2f\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.863543 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-utilities\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.863727 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tx2f\" (UniqueName: \"kubernetes.io/projected/4dd225b7-8c02-4c95-ab74-ab7950a495c7-kube-api-access-9tx2f\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.863874 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-catalog-content\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.864015 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-utilities\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.864509 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-catalog-content\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.890512 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tx2f\" (UniqueName: \"kubernetes.io/projected/4dd225b7-8c02-4c95-ab74-ab7950a495c7-kube-api-access-9tx2f\") pod \"redhat-operators-27g5z\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:03 crc kubenswrapper[4701]: I1121 20:21:03.996014 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:04 crc kubenswrapper[4701]: I1121 20:21:04.478502 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-27g5z"] Nov 21 20:21:04 crc kubenswrapper[4701]: I1121 20:21:04.854542 4701 generic.go:334] "Generic (PLEG): container finished" podID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerID="57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c" exitCode=0 Nov 21 20:21:04 crc kubenswrapper[4701]: I1121 20:21:04.854662 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerDied","Data":"57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c"} Nov 21 20:21:04 crc kubenswrapper[4701]: I1121 20:21:04.854867 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerStarted","Data":"a1c0d2a48e73659e2b03c9d33977970f4c42593e5243cbaf2ab562e00e41e152"} Nov 21 20:21:04 crc kubenswrapper[4701]: I1121 20:21:04.856759 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:21:05 crc kubenswrapper[4701]: I1121 20:21:05.865018 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerStarted","Data":"1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c"} Nov 21 20:21:09 crc kubenswrapper[4701]: I1121 20:21:09.916552 4701 generic.go:334] "Generic (PLEG): container finished" podID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerID="1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c" exitCode=0 Nov 21 20:21:09 crc kubenswrapper[4701]: I1121 20:21:09.916651 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerDied","Data":"1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c"} Nov 21 20:21:10 crc kubenswrapper[4701]: I1121 20:21:10.976369 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerStarted","Data":"3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2"} Nov 21 20:21:11 crc kubenswrapper[4701]: I1121 20:21:11.021316 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-27g5z" podStartSLOduration=2.369261721 podStartE2EDuration="8.02129989s" podCreationTimestamp="2025-11-21 20:21:03 +0000 UTC" firstStartedPulling="2025-11-21 20:21:04.856511401 +0000 UTC m=+4755.641651428" lastFinishedPulling="2025-11-21 20:21:10.50854953 +0000 UTC m=+4761.293689597" observedRunningTime="2025-11-21 20:21:11.017687004 +0000 UTC m=+4761.802827031" watchObservedRunningTime="2025-11-21 20:21:11.02129989 +0000 UTC m=+4761.806439917" Nov 21 20:21:13 crc kubenswrapper[4701]: I1121 20:21:13.996179 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:13 crc kubenswrapper[4701]: I1121 20:21:13.996893 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:15 crc kubenswrapper[4701]: I1121 20:21:15.046452 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-27g5z" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" probeResult="failure" output=< Nov 21 20:21:15 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:21:15 crc kubenswrapper[4701]: > Nov 21 20:21:25 crc kubenswrapper[4701]: I1121 20:21:25.066889 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-27g5z" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" probeResult="failure" output=< Nov 21 20:21:25 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:21:25 crc kubenswrapper[4701]: > Nov 21 20:21:35 crc kubenswrapper[4701]: I1121 20:21:35.038501 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-27g5z" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" probeResult="failure" output=< Nov 21 20:21:35 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:21:35 crc kubenswrapper[4701]: > Nov 21 20:21:44 crc kubenswrapper[4701]: I1121 20:21:44.088119 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:44 crc kubenswrapper[4701]: I1121 20:21:44.181515 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:44 crc kubenswrapper[4701]: I1121 20:21:44.343506 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-27g5z"] Nov 21 20:21:45 crc kubenswrapper[4701]: I1121 20:21:45.452892 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-27g5z" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" containerID="cri-o://3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2" gracePeriod=2 Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.131851 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.290975 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-utilities\") pod \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.291130 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tx2f\" (UniqueName: \"kubernetes.io/projected/4dd225b7-8c02-4c95-ab74-ab7950a495c7-kube-api-access-9tx2f\") pod \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.291363 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-catalog-content\") pod \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\" (UID: \"4dd225b7-8c02-4c95-ab74-ab7950a495c7\") " Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.292114 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-utilities" (OuterVolumeSpecName: "utilities") pod "4dd225b7-8c02-4c95-ab74-ab7950a495c7" (UID: "4dd225b7-8c02-4c95-ab74-ab7950a495c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.302725 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dd225b7-8c02-4c95-ab74-ab7950a495c7-kube-api-access-9tx2f" (OuterVolumeSpecName: "kube-api-access-9tx2f") pod "4dd225b7-8c02-4c95-ab74-ab7950a495c7" (UID: "4dd225b7-8c02-4c95-ab74-ab7950a495c7"). InnerVolumeSpecName "kube-api-access-9tx2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.395670 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.395733 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tx2f\" (UniqueName: \"kubernetes.io/projected/4dd225b7-8c02-4c95-ab74-ab7950a495c7-kube-api-access-9tx2f\") on node \"crc\" DevicePath \"\"" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.421384 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4dd225b7-8c02-4c95-ab74-ab7950a495c7" (UID: "4dd225b7-8c02-4c95-ab74-ab7950a495c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.468930 4701 generic.go:334] "Generic (PLEG): container finished" podID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerID="3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2" exitCode=0 Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.468986 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerDied","Data":"3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2"} Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.469024 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27g5z" event={"ID":"4dd225b7-8c02-4c95-ab74-ab7950a495c7","Type":"ContainerDied","Data":"a1c0d2a48e73659e2b03c9d33977970f4c42593e5243cbaf2ab562e00e41e152"} Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.469028 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27g5z" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.469048 4701 scope.go:117] "RemoveContainer" containerID="3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.500694 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dd225b7-8c02-4c95-ab74-ab7950a495c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.513790 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-27g5z"] Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.519531 4701 scope.go:117] "RemoveContainer" containerID="1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.525819 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-27g5z"] Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.578516 4701 scope.go:117] "RemoveContainer" containerID="57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.627081 4701 scope.go:117] "RemoveContainer" containerID="3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2" Nov 21 20:21:46 crc kubenswrapper[4701]: E1121 20:21:46.627886 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2\": container with ID starting with 3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2 not found: ID does not exist" containerID="3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.627958 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2"} err="failed to get container status \"3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2\": rpc error: code = NotFound desc = could not find container \"3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2\": container with ID starting with 3482c9fa12d03743537de75fe47e6016b327789d7c2a6988ac471a820c2010e2 not found: ID does not exist" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.628003 4701 scope.go:117] "RemoveContainer" containerID="1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c" Nov 21 20:21:46 crc kubenswrapper[4701]: E1121 20:21:46.628638 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c\": container with ID starting with 1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c not found: ID does not exist" containerID="1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.628682 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c"} err="failed to get container status \"1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c\": rpc error: code = NotFound desc = could not find container \"1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c\": container with ID starting with 1a12f204d530b3360fc37da1552c1130d9230f1303b588d0651c765d3fd41d0c not found: ID does not exist" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.628713 4701 scope.go:117] "RemoveContainer" containerID="57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c" Nov 21 20:21:46 crc kubenswrapper[4701]: E1121 20:21:46.629068 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c\": container with ID starting with 57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c not found: ID does not exist" containerID="57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c" Nov 21 20:21:46 crc kubenswrapper[4701]: I1121 20:21:46.629124 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c"} err="failed to get container status \"57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c\": rpc error: code = NotFound desc = could not find container \"57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c\": container with ID starting with 57606e73bf31711666a1b563313da802e9704063280fe7a50b0d50ee475ba06c not found: ID does not exist" Nov 21 20:21:47 crc kubenswrapper[4701]: I1121 20:21:47.972424 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" path="/var/lib/kubelet/pods/4dd225b7-8c02-4c95-ab74-ab7950a495c7/volumes" Nov 21 20:22:18 crc kubenswrapper[4701]: I1121 20:22:18.614046 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:22:18 crc kubenswrapper[4701]: I1121 20:22:18.614875 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:22:48 crc kubenswrapper[4701]: I1121 20:22:48.613742 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:22:48 crc kubenswrapper[4701]: I1121 20:22:48.615360 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:23:18 crc kubenswrapper[4701]: I1121 20:23:18.614344 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:23:18 crc kubenswrapper[4701]: I1121 20:23:18.615275 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:23:18 crc kubenswrapper[4701]: I1121 20:23:18.615355 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:23:18 crc kubenswrapper[4701]: I1121 20:23:18.616355 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"be6a0899714ace0ec892ba2c0def9dffaa232cf3e4bfd85df650721c6e9c5c6e"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:23:18 crc kubenswrapper[4701]: I1121 20:23:18.616458 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://be6a0899714ace0ec892ba2c0def9dffaa232cf3e4bfd85df650721c6e9c5c6e" gracePeriod=600 Nov 21 20:23:19 crc kubenswrapper[4701]: I1121 20:23:19.163769 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="be6a0899714ace0ec892ba2c0def9dffaa232cf3e4bfd85df650721c6e9c5c6e" exitCode=0 Nov 21 20:23:19 crc kubenswrapper[4701]: I1121 20:23:19.163870 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"be6a0899714ace0ec892ba2c0def9dffaa232cf3e4bfd85df650721c6e9c5c6e"} Nov 21 20:23:19 crc kubenswrapper[4701]: I1121 20:23:19.164290 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad"} Nov 21 20:23:19 crc kubenswrapper[4701]: I1121 20:23:19.164318 4701 scope.go:117] "RemoveContainer" containerID="8397e937421223f44758d7ea6a9f4b7466ecd48993bb2759a8a9fa51ef368080" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.126707 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sblvj"] Nov 21 20:23:29 crc kubenswrapper[4701]: E1121 20:23:29.129067 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="extract-utilities" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.129082 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="extract-utilities" Nov 21 20:23:29 crc kubenswrapper[4701]: E1121 20:23:29.129109 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="extract-content" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.129115 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="extract-content" Nov 21 20:23:29 crc kubenswrapper[4701]: E1121 20:23:29.129135 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.129141 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.129384 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dd225b7-8c02-4c95-ab74-ab7950a495c7" containerName="registry-server" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.130961 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.152844 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sblvj"] Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.303935 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttfzl\" (UniqueName: \"kubernetes.io/projected/acad3f06-8482-47c6-bb1b-211035725307-kube-api-access-ttfzl\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.304166 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-utilities\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.304605 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-catalog-content\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.407417 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-catalog-content\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.407538 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttfzl\" (UniqueName: \"kubernetes.io/projected/acad3f06-8482-47c6-bb1b-211035725307-kube-api-access-ttfzl\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.407598 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-utilities\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.408108 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-utilities\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.408629 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-catalog-content\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.429286 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttfzl\" (UniqueName: \"kubernetes.io/projected/acad3f06-8482-47c6-bb1b-211035725307-kube-api-access-ttfzl\") pod \"community-operators-sblvj\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:29 crc kubenswrapper[4701]: I1121 20:23:29.463645 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:30 crc kubenswrapper[4701]: I1121 20:23:30.001518 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sblvj"] Nov 21 20:23:30 crc kubenswrapper[4701]: I1121 20:23:30.298306 4701 generic.go:334] "Generic (PLEG): container finished" podID="acad3f06-8482-47c6-bb1b-211035725307" containerID="13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580" exitCode=0 Nov 21 20:23:30 crc kubenswrapper[4701]: I1121 20:23:30.298455 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerDied","Data":"13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580"} Nov 21 20:23:30 crc kubenswrapper[4701]: I1121 20:23:30.299751 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerStarted","Data":"b005ce508fda91f3aa992564a32cc2a5f914eb6b1037dd246052889dfe33cc0e"} Nov 21 20:23:31 crc kubenswrapper[4701]: I1121 20:23:31.315385 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerStarted","Data":"ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90"} Nov 21 20:23:33 crc kubenswrapper[4701]: I1121 20:23:33.355641 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerDied","Data":"ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90"} Nov 21 20:23:33 crc kubenswrapper[4701]: I1121 20:23:33.355580 4701 generic.go:334] "Generic (PLEG): container finished" podID="acad3f06-8482-47c6-bb1b-211035725307" containerID="ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90" exitCode=0 Nov 21 20:23:34 crc kubenswrapper[4701]: I1121 20:23:34.373461 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerStarted","Data":"24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070"} Nov 21 20:23:34 crc kubenswrapper[4701]: I1121 20:23:34.412406 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sblvj" podStartSLOduration=1.962862736 podStartE2EDuration="5.412367485s" podCreationTimestamp="2025-11-21 20:23:29 +0000 UTC" firstStartedPulling="2025-11-21 20:23:30.301648826 +0000 UTC m=+4901.086788853" lastFinishedPulling="2025-11-21 20:23:33.751153565 +0000 UTC m=+4904.536293602" observedRunningTime="2025-11-21 20:23:34.397754074 +0000 UTC m=+4905.182894131" watchObservedRunningTime="2025-11-21 20:23:34.412367485 +0000 UTC m=+4905.197507552" Nov 21 20:23:39 crc kubenswrapper[4701]: I1121 20:23:39.464341 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:39 crc kubenswrapper[4701]: I1121 20:23:39.465065 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:39 crc kubenswrapper[4701]: I1121 20:23:39.530460 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:40 crc kubenswrapper[4701]: I1121 20:23:40.524909 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:40 crc kubenswrapper[4701]: I1121 20:23:40.609166 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sblvj"] Nov 21 20:23:42 crc kubenswrapper[4701]: I1121 20:23:42.470416 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sblvj" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="registry-server" containerID="cri-o://24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070" gracePeriod=2 Nov 21 20:23:42 crc kubenswrapper[4701]: I1121 20:23:42.999329 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.069393 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-utilities\") pod \"acad3f06-8482-47c6-bb1b-211035725307\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.069585 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttfzl\" (UniqueName: \"kubernetes.io/projected/acad3f06-8482-47c6-bb1b-211035725307-kube-api-access-ttfzl\") pod \"acad3f06-8482-47c6-bb1b-211035725307\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.069803 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-catalog-content\") pod \"acad3f06-8482-47c6-bb1b-211035725307\" (UID: \"acad3f06-8482-47c6-bb1b-211035725307\") " Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.070478 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-utilities" (OuterVolumeSpecName: "utilities") pod "acad3f06-8482-47c6-bb1b-211035725307" (UID: "acad3f06-8482-47c6-bb1b-211035725307"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.071147 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.085690 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acad3f06-8482-47c6-bb1b-211035725307-kube-api-access-ttfzl" (OuterVolumeSpecName: "kube-api-access-ttfzl") pod "acad3f06-8482-47c6-bb1b-211035725307" (UID: "acad3f06-8482-47c6-bb1b-211035725307"). InnerVolumeSpecName "kube-api-access-ttfzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.124065 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "acad3f06-8482-47c6-bb1b-211035725307" (UID: "acad3f06-8482-47c6-bb1b-211035725307"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.174812 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttfzl\" (UniqueName: \"kubernetes.io/projected/acad3f06-8482-47c6-bb1b-211035725307-kube-api-access-ttfzl\") on node \"crc\" DevicePath \"\"" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.174858 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acad3f06-8482-47c6-bb1b-211035725307-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.495606 4701 generic.go:334] "Generic (PLEG): container finished" podID="acad3f06-8482-47c6-bb1b-211035725307" containerID="24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070" exitCode=0 Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.495680 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerDied","Data":"24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070"} Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.495735 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sblvj" event={"ID":"acad3f06-8482-47c6-bb1b-211035725307","Type":"ContainerDied","Data":"b005ce508fda91f3aa992564a32cc2a5f914eb6b1037dd246052889dfe33cc0e"} Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.495746 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sblvj" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.495767 4701 scope.go:117] "RemoveContainer" containerID="24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.565996 4701 scope.go:117] "RemoveContainer" containerID="ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.586843 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sblvj"] Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.600862 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sblvj"] Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.629985 4701 scope.go:117] "RemoveContainer" containerID="13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.668540 4701 scope.go:117] "RemoveContainer" containerID="24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070" Nov 21 20:23:43 crc kubenswrapper[4701]: E1121 20:23:43.669407 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070\": container with ID starting with 24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070 not found: ID does not exist" containerID="24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.669463 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070"} err="failed to get container status \"24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070\": rpc error: code = NotFound desc = could not find container \"24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070\": container with ID starting with 24b5d2d7905f8937ee92254b0bf1490baf65e792c68f3724aee8eaff8376e070 not found: ID does not exist" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.669498 4701 scope.go:117] "RemoveContainer" containerID="ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90" Nov 21 20:23:43 crc kubenswrapper[4701]: E1121 20:23:43.670100 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90\": container with ID starting with ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90 not found: ID does not exist" containerID="ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.670142 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90"} err="failed to get container status \"ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90\": rpc error: code = NotFound desc = could not find container \"ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90\": container with ID starting with ef9fe9cfda2b4d9d36901c0e3f256356ac67160fa904b98ec08f89844f5bef90 not found: ID does not exist" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.670176 4701 scope.go:117] "RemoveContainer" containerID="13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580" Nov 21 20:23:43 crc kubenswrapper[4701]: E1121 20:23:43.670577 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580\": container with ID starting with 13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580 not found: ID does not exist" containerID="13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.670613 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580"} err="failed to get container status \"13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580\": rpc error: code = NotFound desc = could not find container \"13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580\": container with ID starting with 13e48de7c25f49bef5c920c13a019e5f56f2ef101f3fe655f174f5c442ee8580 not found: ID does not exist" Nov 21 20:23:43 crc kubenswrapper[4701]: I1121 20:23:43.967095 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acad3f06-8482-47c6-bb1b-211035725307" path="/var/lib/kubelet/pods/acad3f06-8482-47c6-bb1b-211035725307/volumes" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.622963 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xp24v"] Nov 21 20:25:13 crc kubenswrapper[4701]: E1121 20:25:13.624435 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="extract-utilities" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.624452 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="extract-utilities" Nov 21 20:25:13 crc kubenswrapper[4701]: E1121 20:25:13.624493 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="registry-server" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.624503 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="registry-server" Nov 21 20:25:13 crc kubenswrapper[4701]: E1121 20:25:13.624517 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="extract-content" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.624526 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="extract-content" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.624873 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="acad3f06-8482-47c6-bb1b-211035725307" containerName="registry-server" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.627082 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.648963 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xp24v"] Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.766013 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4pvx\" (UniqueName: \"kubernetes.io/projected/062d0927-94b3-4a46-a04a-0ec456e545c0-kube-api-access-b4pvx\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.766357 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-utilities\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.766670 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-catalog-content\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.869120 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-catalog-content\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.869300 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4pvx\" (UniqueName: \"kubernetes.io/projected/062d0927-94b3-4a46-a04a-0ec456e545c0-kube-api-access-b4pvx\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.869403 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-utilities\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.869981 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-utilities\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.870004 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-catalog-content\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.934323 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4pvx\" (UniqueName: \"kubernetes.io/projected/062d0927-94b3-4a46-a04a-0ec456e545c0-kube-api-access-b4pvx\") pod \"certified-operators-xp24v\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:13 crc kubenswrapper[4701]: I1121 20:25:13.989047 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:14 crc kubenswrapper[4701]: I1121 20:25:14.523338 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xp24v"] Nov 21 20:25:14 crc kubenswrapper[4701]: I1121 20:25:14.769177 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerStarted","Data":"d5f11351304bf64038a2a70dd0f75396103eebe8fb7b924b33acc209f057b0ba"} Nov 21 20:25:15 crc kubenswrapper[4701]: I1121 20:25:15.783072 4701 generic.go:334] "Generic (PLEG): container finished" podID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerID="41e6e34b7ac23189016e2d3588aa40a551cd2486eef345e4561b4c21255cf132" exitCode=0 Nov 21 20:25:15 crc kubenswrapper[4701]: I1121 20:25:15.783178 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerDied","Data":"41e6e34b7ac23189016e2d3588aa40a551cd2486eef345e4561b4c21255cf132"} Nov 21 20:25:16 crc kubenswrapper[4701]: I1121 20:25:16.814104 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerStarted","Data":"b593cc064de12699ec0263025ffdbcfd7f70866b27a865a74b74ee053cf70d9d"} Nov 21 20:25:17 crc kubenswrapper[4701]: I1121 20:25:17.830898 4701 generic.go:334] "Generic (PLEG): container finished" podID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerID="b593cc064de12699ec0263025ffdbcfd7f70866b27a865a74b74ee053cf70d9d" exitCode=0 Nov 21 20:25:17 crc kubenswrapper[4701]: I1121 20:25:17.830998 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerDied","Data":"b593cc064de12699ec0263025ffdbcfd7f70866b27a865a74b74ee053cf70d9d"} Nov 21 20:25:18 crc kubenswrapper[4701]: I1121 20:25:18.613684 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:25:18 crc kubenswrapper[4701]: I1121 20:25:18.613780 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:25:19 crc kubenswrapper[4701]: I1121 20:25:19.865540 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerStarted","Data":"30ad3d5dbdc47b2d8f7bcac27d3be38a76a3beaad9f07d10222b035519bcce8f"} Nov 21 20:25:19 crc kubenswrapper[4701]: I1121 20:25:19.896659 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xp24v" podStartSLOduration=4.181906752 podStartE2EDuration="6.896637727s" podCreationTimestamp="2025-11-21 20:25:13 +0000 UTC" firstStartedPulling="2025-11-21 20:25:15.786384991 +0000 UTC m=+5006.571525018" lastFinishedPulling="2025-11-21 20:25:18.501115936 +0000 UTC m=+5009.286255993" observedRunningTime="2025-11-21 20:25:19.889909788 +0000 UTC m=+5010.675049825" watchObservedRunningTime="2025-11-21 20:25:19.896637727 +0000 UTC m=+5010.681777774" Nov 21 20:25:23 crc kubenswrapper[4701]: I1121 20:25:23.990088 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:23 crc kubenswrapper[4701]: I1121 20:25:23.990804 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:24 crc kubenswrapper[4701]: I1121 20:25:24.043162 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:25 crc kubenswrapper[4701]: I1121 20:25:25.013735 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:25 crc kubenswrapper[4701]: I1121 20:25:25.080573 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xp24v"] Nov 21 20:25:26 crc kubenswrapper[4701]: I1121 20:25:26.969177 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xp24v" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="registry-server" containerID="cri-o://30ad3d5dbdc47b2d8f7bcac27d3be38a76a3beaad9f07d10222b035519bcce8f" gracePeriod=2 Nov 21 20:25:27 crc kubenswrapper[4701]: I1121 20:25:27.979733 4701 generic.go:334] "Generic (PLEG): container finished" podID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerID="30ad3d5dbdc47b2d8f7bcac27d3be38a76a3beaad9f07d10222b035519bcce8f" exitCode=0 Nov 21 20:25:27 crc kubenswrapper[4701]: I1121 20:25:27.979820 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerDied","Data":"30ad3d5dbdc47b2d8f7bcac27d3be38a76a3beaad9f07d10222b035519bcce8f"} Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.120274 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.253487 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-utilities\") pod \"062d0927-94b3-4a46-a04a-0ec456e545c0\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.253971 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4pvx\" (UniqueName: \"kubernetes.io/projected/062d0927-94b3-4a46-a04a-0ec456e545c0-kube-api-access-b4pvx\") pod \"062d0927-94b3-4a46-a04a-0ec456e545c0\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.254044 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-catalog-content\") pod \"062d0927-94b3-4a46-a04a-0ec456e545c0\" (UID: \"062d0927-94b3-4a46-a04a-0ec456e545c0\") " Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.254425 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-utilities" (OuterVolumeSpecName: "utilities") pod "062d0927-94b3-4a46-a04a-0ec456e545c0" (UID: "062d0927-94b3-4a46-a04a-0ec456e545c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.254765 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.271613 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/062d0927-94b3-4a46-a04a-0ec456e545c0-kube-api-access-b4pvx" (OuterVolumeSpecName: "kube-api-access-b4pvx") pod "062d0927-94b3-4a46-a04a-0ec456e545c0" (UID: "062d0927-94b3-4a46-a04a-0ec456e545c0"). InnerVolumeSpecName "kube-api-access-b4pvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.333327 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "062d0927-94b3-4a46-a04a-0ec456e545c0" (UID: "062d0927-94b3-4a46-a04a-0ec456e545c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.357275 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4pvx\" (UniqueName: \"kubernetes.io/projected/062d0927-94b3-4a46-a04a-0ec456e545c0-kube-api-access-b4pvx\") on node \"crc\" DevicePath \"\"" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.357309 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062d0927-94b3-4a46-a04a-0ec456e545c0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.995363 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp24v" event={"ID":"062d0927-94b3-4a46-a04a-0ec456e545c0","Type":"ContainerDied","Data":"d5f11351304bf64038a2a70dd0f75396103eebe8fb7b924b33acc209f057b0ba"} Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.995451 4701 scope.go:117] "RemoveContainer" containerID="30ad3d5dbdc47b2d8f7bcac27d3be38a76a3beaad9f07d10222b035519bcce8f" Nov 21 20:25:28 crc kubenswrapper[4701]: I1121 20:25:28.995462 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp24v" Nov 21 20:25:29 crc kubenswrapper[4701]: I1121 20:25:29.037805 4701 scope.go:117] "RemoveContainer" containerID="b593cc064de12699ec0263025ffdbcfd7f70866b27a865a74b74ee053cf70d9d" Nov 21 20:25:29 crc kubenswrapper[4701]: I1121 20:25:29.053302 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xp24v"] Nov 21 20:25:29 crc kubenswrapper[4701]: I1121 20:25:29.077955 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xp24v"] Nov 21 20:25:29 crc kubenswrapper[4701]: I1121 20:25:29.085474 4701 scope.go:117] "RemoveContainer" containerID="41e6e34b7ac23189016e2d3588aa40a551cd2486eef345e4561b4c21255cf132" Nov 21 20:25:29 crc kubenswrapper[4701]: I1121 20:25:29.972925 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" path="/var/lib/kubelet/pods/062d0927-94b3-4a46-a04a-0ec456e545c0/volumes" Nov 21 20:25:48 crc kubenswrapper[4701]: I1121 20:25:48.614297 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:25:48 crc kubenswrapper[4701]: I1121 20:25:48.615088 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:26:18 crc kubenswrapper[4701]: I1121 20:26:18.614107 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:26:18 crc kubenswrapper[4701]: I1121 20:26:18.614748 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:26:18 crc kubenswrapper[4701]: I1121 20:26:18.614823 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:26:18 crc kubenswrapper[4701]: I1121 20:26:18.616039 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:26:18 crc kubenswrapper[4701]: I1121 20:26:18.616140 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" gracePeriod=600 Nov 21 20:26:18 crc kubenswrapper[4701]: E1121 20:26:18.744028 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:26:19 crc kubenswrapper[4701]: I1121 20:26:19.713402 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" exitCode=0 Nov 21 20:26:19 crc kubenswrapper[4701]: I1121 20:26:19.713471 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad"} Nov 21 20:26:19 crc kubenswrapper[4701]: I1121 20:26:19.713516 4701 scope.go:117] "RemoveContainer" containerID="be6a0899714ace0ec892ba2c0def9dffaa232cf3e4bfd85df650721c6e9c5c6e" Nov 21 20:26:19 crc kubenswrapper[4701]: I1121 20:26:19.714663 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:26:19 crc kubenswrapper[4701]: E1121 20:26:19.715513 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:26:34 crc kubenswrapper[4701]: I1121 20:26:34.952239 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:26:34 crc kubenswrapper[4701]: E1121 20:26:34.955413 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:26:49 crc kubenswrapper[4701]: I1121 20:26:49.966506 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:26:49 crc kubenswrapper[4701]: E1121 20:26:49.967924 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:27:01 crc kubenswrapper[4701]: I1121 20:27:01.955194 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:27:01 crc kubenswrapper[4701]: E1121 20:27:01.957239 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:27:15 crc kubenswrapper[4701]: I1121 20:27:15.951747 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:27:15 crc kubenswrapper[4701]: E1121 20:27:15.952980 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:27:26 crc kubenswrapper[4701]: I1121 20:27:26.951823 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:27:26 crc kubenswrapper[4701]: E1121 20:27:26.952781 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:27:37 crc kubenswrapper[4701]: I1121 20:27:37.952139 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:27:37 crc kubenswrapper[4701]: E1121 20:27:37.953564 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:27:51 crc kubenswrapper[4701]: I1121 20:27:51.951348 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:27:51 crc kubenswrapper[4701]: E1121 20:27:51.952439 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:28:02 crc kubenswrapper[4701]: I1121 20:28:02.952047 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:28:02 crc kubenswrapper[4701]: E1121 20:28:02.953102 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:28:10 crc kubenswrapper[4701]: E1121 20:28:10.816372 4701 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.12:58416->38.102.83.12:39339: write tcp 38.102.83.12:58416->38.102.83.12:39339: write: broken pipe Nov 21 20:28:16 crc kubenswrapper[4701]: I1121 20:28:16.951444 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:28:16 crc kubenswrapper[4701]: E1121 20:28:16.952814 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:28:27 crc kubenswrapper[4701]: I1121 20:28:27.951167 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:28:27 crc kubenswrapper[4701]: E1121 20:28:27.951989 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:28:40 crc kubenswrapper[4701]: I1121 20:28:40.952359 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:28:40 crc kubenswrapper[4701]: E1121 20:28:40.953455 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:28:51 crc kubenswrapper[4701]: I1121 20:28:51.951658 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:28:51 crc kubenswrapper[4701]: E1121 20:28:51.955026 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:29:02 crc kubenswrapper[4701]: I1121 20:29:02.952273 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:29:02 crc kubenswrapper[4701]: E1121 20:29:02.953794 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.584881 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vxg6j"] Nov 21 20:29:15 crc kubenswrapper[4701]: E1121 20:29:15.585915 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="extract-utilities" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.585930 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="extract-utilities" Nov 21 20:29:15 crc kubenswrapper[4701]: E1121 20:29:15.585940 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="extract-content" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.585948 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="extract-content" Nov 21 20:29:15 crc kubenswrapper[4701]: E1121 20:29:15.585963 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="registry-server" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.585969 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="registry-server" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.586168 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="062d0927-94b3-4a46-a04a-0ec456e545c0" containerName="registry-server" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.587643 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.608480 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vxg6j"] Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.719473 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lzmk\" (UniqueName: \"kubernetes.io/projected/f4ffff03-aead-4145-a20f-bd316ef59456-kube-api-access-5lzmk\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.719660 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-catalog-content\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.719702 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-utilities\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.821592 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-catalog-content\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.821650 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-utilities\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.821777 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lzmk\" (UniqueName: \"kubernetes.io/projected/f4ffff03-aead-4145-a20f-bd316ef59456-kube-api-access-5lzmk\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.822416 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-catalog-content\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.822517 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-utilities\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.843003 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lzmk\" (UniqueName: \"kubernetes.io/projected/f4ffff03-aead-4145-a20f-bd316ef59456-kube-api-access-5lzmk\") pod \"redhat-marketplace-vxg6j\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:15 crc kubenswrapper[4701]: I1121 20:29:15.937824 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:16 crc kubenswrapper[4701]: I1121 20:29:16.518896 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vxg6j"] Nov 21 20:29:16 crc kubenswrapper[4701]: I1121 20:29:16.953623 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:29:16 crc kubenswrapper[4701]: E1121 20:29:16.954561 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:29:17 crc kubenswrapper[4701]: I1121 20:29:17.089390 4701 generic.go:334] "Generic (PLEG): container finished" podID="f4ffff03-aead-4145-a20f-bd316ef59456" containerID="8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc" exitCode=0 Nov 21 20:29:17 crc kubenswrapper[4701]: I1121 20:29:17.089449 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerDied","Data":"8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc"} Nov 21 20:29:17 crc kubenswrapper[4701]: I1121 20:29:17.089532 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerStarted","Data":"f877641c5b8ac99fc73c979ca06a2b4fac5de9388752f60a2073ebec8c359a50"} Nov 21 20:29:17 crc kubenswrapper[4701]: I1121 20:29:17.093314 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:29:18 crc kubenswrapper[4701]: I1121 20:29:18.111970 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerStarted","Data":"9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae"} Nov 21 20:29:19 crc kubenswrapper[4701]: I1121 20:29:19.157389 4701 generic.go:334] "Generic (PLEG): container finished" podID="f4ffff03-aead-4145-a20f-bd316ef59456" containerID="9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae" exitCode=0 Nov 21 20:29:19 crc kubenswrapper[4701]: I1121 20:29:19.157488 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerDied","Data":"9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae"} Nov 21 20:29:20 crc kubenswrapper[4701]: I1121 20:29:20.171515 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerStarted","Data":"1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a"} Nov 21 20:29:20 crc kubenswrapper[4701]: I1121 20:29:20.204711 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vxg6j" podStartSLOduration=2.7643462210000003 podStartE2EDuration="5.20469185s" podCreationTimestamp="2025-11-21 20:29:15 +0000 UTC" firstStartedPulling="2025-11-21 20:29:17.092596133 +0000 UTC m=+5247.877736200" lastFinishedPulling="2025-11-21 20:29:19.532941772 +0000 UTC m=+5250.318081829" observedRunningTime="2025-11-21 20:29:20.197096277 +0000 UTC m=+5250.982236314" watchObservedRunningTime="2025-11-21 20:29:20.20469185 +0000 UTC m=+5250.989831877" Nov 21 20:29:25 crc kubenswrapper[4701]: I1121 20:29:25.938967 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:25 crc kubenswrapper[4701]: I1121 20:29:25.939884 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:25 crc kubenswrapper[4701]: I1121 20:29:25.997906 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:27 crc kubenswrapper[4701]: I1121 20:29:27.223908 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:27 crc kubenswrapper[4701]: I1121 20:29:27.367520 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vxg6j"] Nov 21 20:29:27 crc kubenswrapper[4701]: I1121 20:29:27.951784 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:29:27 crc kubenswrapper[4701]: E1121 20:29:27.952978 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:29:28 crc kubenswrapper[4701]: I1121 20:29:28.564194 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vxg6j" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="registry-server" containerID="cri-o://1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a" gracePeriod=2 Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.197745 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.307890 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-utilities\") pod \"f4ffff03-aead-4145-a20f-bd316ef59456\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.308124 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-catalog-content\") pod \"f4ffff03-aead-4145-a20f-bd316ef59456\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.308299 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lzmk\" (UniqueName: \"kubernetes.io/projected/f4ffff03-aead-4145-a20f-bd316ef59456-kube-api-access-5lzmk\") pod \"f4ffff03-aead-4145-a20f-bd316ef59456\" (UID: \"f4ffff03-aead-4145-a20f-bd316ef59456\") " Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.310914 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-utilities" (OuterVolumeSpecName: "utilities") pod "f4ffff03-aead-4145-a20f-bd316ef59456" (UID: "f4ffff03-aead-4145-a20f-bd316ef59456"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.319010 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4ffff03-aead-4145-a20f-bd316ef59456-kube-api-access-5lzmk" (OuterVolumeSpecName: "kube-api-access-5lzmk") pod "f4ffff03-aead-4145-a20f-bd316ef59456" (UID: "f4ffff03-aead-4145-a20f-bd316ef59456"). InnerVolumeSpecName "kube-api-access-5lzmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.342716 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4ffff03-aead-4145-a20f-bd316ef59456" (UID: "f4ffff03-aead-4145-a20f-bd316ef59456"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.410975 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.411027 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lzmk\" (UniqueName: \"kubernetes.io/projected/f4ffff03-aead-4145-a20f-bd316ef59456-kube-api-access-5lzmk\") on node \"crc\" DevicePath \"\"" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.411047 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4ffff03-aead-4145-a20f-bd316ef59456-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.581526 4701 generic.go:334] "Generic (PLEG): container finished" podID="f4ffff03-aead-4145-a20f-bd316ef59456" containerID="1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a" exitCode=0 Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.581601 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerDied","Data":"1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a"} Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.581625 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vxg6j" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.581646 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vxg6j" event={"ID":"f4ffff03-aead-4145-a20f-bd316ef59456","Type":"ContainerDied","Data":"f877641c5b8ac99fc73c979ca06a2b4fac5de9388752f60a2073ebec8c359a50"} Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.581679 4701 scope.go:117] "RemoveContainer" containerID="1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.622195 4701 scope.go:117] "RemoveContainer" containerID="9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.648617 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vxg6j"] Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.664337 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vxg6j"] Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.666534 4701 scope.go:117] "RemoveContainer" containerID="8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.720579 4701 scope.go:117] "RemoveContainer" containerID="1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a" Nov 21 20:29:29 crc kubenswrapper[4701]: E1121 20:29:29.730077 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a\": container with ID starting with 1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a not found: ID does not exist" containerID="1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.730172 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a"} err="failed to get container status \"1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a\": rpc error: code = NotFound desc = could not find container \"1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a\": container with ID starting with 1f400b439c1bdcb820016bae141050aaf26b640c08cda379598f9f144297cb1a not found: ID does not exist" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.730254 4701 scope.go:117] "RemoveContainer" containerID="9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae" Nov 21 20:29:29 crc kubenswrapper[4701]: E1121 20:29:29.731172 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae\": container with ID starting with 9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae not found: ID does not exist" containerID="9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.731248 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae"} err="failed to get container status \"9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae\": rpc error: code = NotFound desc = could not find container \"9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae\": container with ID starting with 9620ffa1c04e428daad82ef7e8d37f4c9c5fe19003517984c287f377ea313bae not found: ID does not exist" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.731284 4701 scope.go:117] "RemoveContainer" containerID="8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc" Nov 21 20:29:29 crc kubenswrapper[4701]: E1121 20:29:29.731737 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc\": container with ID starting with 8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc not found: ID does not exist" containerID="8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.731915 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc"} err="failed to get container status \"8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc\": rpc error: code = NotFound desc = could not find container \"8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc\": container with ID starting with 8ea28a8219054e2ef61f34fb03f50c2f593d97f2a246a7f6d2835cf8b3dc71dc not found: ID does not exist" Nov 21 20:29:29 crc kubenswrapper[4701]: I1121 20:29:29.967070 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" path="/var/lib/kubelet/pods/f4ffff03-aead-4145-a20f-bd316ef59456/volumes" Nov 21 20:29:40 crc kubenswrapper[4701]: I1121 20:29:40.951323 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:29:40 crc kubenswrapper[4701]: E1121 20:29:40.952347 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:29:55 crc kubenswrapper[4701]: I1121 20:29:55.952796 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:29:55 crc kubenswrapper[4701]: E1121 20:29:55.953870 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.183849 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6"] Nov 21 20:30:00 crc kubenswrapper[4701]: E1121 20:30:00.185548 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="extract-content" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.185587 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="extract-content" Nov 21 20:30:00 crc kubenswrapper[4701]: E1121 20:30:00.185626 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="registry-server" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.185646 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="registry-server" Nov 21 20:30:00 crc kubenswrapper[4701]: E1121 20:30:00.185734 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="extract-utilities" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.185754 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="extract-utilities" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.186192 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4ffff03-aead-4145-a20f-bd316ef59456" containerName="registry-server" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.187526 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.191151 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.191948 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.208841 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6"] Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.316065 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpr5v\" (UniqueName: \"kubernetes.io/projected/cec9cb37-9b62-40a7-95d5-7725a39f64a3-kube-api-access-kpr5v\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.316509 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cec9cb37-9b62-40a7-95d5-7725a39f64a3-config-volume\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.316636 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cec9cb37-9b62-40a7-95d5-7725a39f64a3-secret-volume\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.418452 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cec9cb37-9b62-40a7-95d5-7725a39f64a3-config-volume\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.418643 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cec9cb37-9b62-40a7-95d5-7725a39f64a3-secret-volume\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.418699 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpr5v\" (UniqueName: \"kubernetes.io/projected/cec9cb37-9b62-40a7-95d5-7725a39f64a3-kube-api-access-kpr5v\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.420339 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cec9cb37-9b62-40a7-95d5-7725a39f64a3-config-volume\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.429719 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cec9cb37-9b62-40a7-95d5-7725a39f64a3-secret-volume\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.450878 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpr5v\" (UniqueName: \"kubernetes.io/projected/cec9cb37-9b62-40a7-95d5-7725a39f64a3-kube-api-access-kpr5v\") pod \"collect-profiles-29395950-prkc6\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:00 crc kubenswrapper[4701]: I1121 20:30:00.551849 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:01 crc kubenswrapper[4701]: I1121 20:30:01.052539 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6"] Nov 21 20:30:01 crc kubenswrapper[4701]: W1121 20:30:01.065146 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcec9cb37_9b62_40a7_95d5_7725a39f64a3.slice/crio-910a70737a391fbcbb2cb7b62fe722e6717e5ad45a01acc6ef28de45a8c04a5e WatchSource:0}: Error finding container 910a70737a391fbcbb2cb7b62fe722e6717e5ad45a01acc6ef28de45a8c04a5e: Status 404 returned error can't find the container with id 910a70737a391fbcbb2cb7b62fe722e6717e5ad45a01acc6ef28de45a8c04a5e Nov 21 20:30:01 crc kubenswrapper[4701]: I1121 20:30:01.091269 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" event={"ID":"cec9cb37-9b62-40a7-95d5-7725a39f64a3","Type":"ContainerStarted","Data":"910a70737a391fbcbb2cb7b62fe722e6717e5ad45a01acc6ef28de45a8c04a5e"} Nov 21 20:30:02 crc kubenswrapper[4701]: I1121 20:30:02.107241 4701 generic.go:334] "Generic (PLEG): container finished" podID="cec9cb37-9b62-40a7-95d5-7725a39f64a3" containerID="badb193c9e1f89ba7ad14372aa3a9fe02c01bb298f0b40dced89ba2b2ce97b52" exitCode=0 Nov 21 20:30:02 crc kubenswrapper[4701]: I1121 20:30:02.107360 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" event={"ID":"cec9cb37-9b62-40a7-95d5-7725a39f64a3","Type":"ContainerDied","Data":"badb193c9e1f89ba7ad14372aa3a9fe02c01bb298f0b40dced89ba2b2ce97b52"} Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.561990 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.705490 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cec9cb37-9b62-40a7-95d5-7725a39f64a3-secret-volume\") pod \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.705610 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cec9cb37-9b62-40a7-95d5-7725a39f64a3-config-volume\") pod \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.705738 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpr5v\" (UniqueName: \"kubernetes.io/projected/cec9cb37-9b62-40a7-95d5-7725a39f64a3-kube-api-access-kpr5v\") pod \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\" (UID: \"cec9cb37-9b62-40a7-95d5-7725a39f64a3\") " Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.706545 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cec9cb37-9b62-40a7-95d5-7725a39f64a3-config-volume" (OuterVolumeSpecName: "config-volume") pod "cec9cb37-9b62-40a7-95d5-7725a39f64a3" (UID: "cec9cb37-9b62-40a7-95d5-7725a39f64a3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.715316 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cec9cb37-9b62-40a7-95d5-7725a39f64a3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cec9cb37-9b62-40a7-95d5-7725a39f64a3" (UID: "cec9cb37-9b62-40a7-95d5-7725a39f64a3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.722187 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cec9cb37-9b62-40a7-95d5-7725a39f64a3-kube-api-access-kpr5v" (OuterVolumeSpecName: "kube-api-access-kpr5v") pod "cec9cb37-9b62-40a7-95d5-7725a39f64a3" (UID: "cec9cb37-9b62-40a7-95d5-7725a39f64a3"). InnerVolumeSpecName "kube-api-access-kpr5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.808998 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cec9cb37-9b62-40a7-95d5-7725a39f64a3-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.809053 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpr5v\" (UniqueName: \"kubernetes.io/projected/cec9cb37-9b62-40a7-95d5-7725a39f64a3-kube-api-access-kpr5v\") on node \"crc\" DevicePath \"\"" Nov 21 20:30:03 crc kubenswrapper[4701]: I1121 20:30:03.809074 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cec9cb37-9b62-40a7-95d5-7725a39f64a3-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:30:04 crc kubenswrapper[4701]: I1121 20:30:04.141996 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" event={"ID":"cec9cb37-9b62-40a7-95d5-7725a39f64a3","Type":"ContainerDied","Data":"910a70737a391fbcbb2cb7b62fe722e6717e5ad45a01acc6ef28de45a8c04a5e"} Nov 21 20:30:04 crc kubenswrapper[4701]: I1121 20:30:04.142059 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="910a70737a391fbcbb2cb7b62fe722e6717e5ad45a01acc6ef28de45a8c04a5e" Nov 21 20:30:04 crc kubenswrapper[4701]: I1121 20:30:04.142058 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395950-prkc6" Nov 21 20:30:04 crc kubenswrapper[4701]: I1121 20:30:04.666697 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6"] Nov 21 20:30:04 crc kubenswrapper[4701]: I1121 20:30:04.681820 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395905-v6sr6"] Nov 21 20:30:05 crc kubenswrapper[4701]: I1121 20:30:05.964123 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210c4103-e8cf-4b16-bb3b-2363cb5d24e6" path="/var/lib/kubelet/pods/210c4103-e8cf-4b16-bb3b-2363cb5d24e6/volumes" Nov 21 20:30:10 crc kubenswrapper[4701]: I1121 20:30:10.951741 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:30:10 crc kubenswrapper[4701]: E1121 20:30:10.952514 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:30:12 crc kubenswrapper[4701]: I1121 20:30:12.596545 4701 scope.go:117] "RemoveContainer" containerID="5a4aac1587422cb8ed11599ae79ddc74b896e58b323d71c549d742191a31c9bc" Nov 21 20:30:25 crc kubenswrapper[4701]: I1121 20:30:25.951717 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:30:25 crc kubenswrapper[4701]: E1121 20:30:25.952546 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:30:40 crc kubenswrapper[4701]: I1121 20:30:40.951349 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:30:40 crc kubenswrapper[4701]: E1121 20:30:40.952315 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:30:52 crc kubenswrapper[4701]: I1121 20:30:52.952994 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:30:52 crc kubenswrapper[4701]: E1121 20:30:52.954534 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:31:07 crc kubenswrapper[4701]: I1121 20:31:07.952188 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:31:07 crc kubenswrapper[4701]: E1121 20:31:07.952949 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:31:20 crc kubenswrapper[4701]: I1121 20:31:20.951559 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:31:22 crc kubenswrapper[4701]: I1121 20:31:22.115984 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"d5dc02419272f9e53951b6370a00d7483b73cabdde05d29b33488b3f2b2da447"} Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.047019 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rhz7j"] Nov 21 20:31:47 crc kubenswrapper[4701]: E1121 20:31:47.048661 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec9cb37-9b62-40a7-95d5-7725a39f64a3" containerName="collect-profiles" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.048678 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec9cb37-9b62-40a7-95d5-7725a39f64a3" containerName="collect-profiles" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.049605 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="cec9cb37-9b62-40a7-95d5-7725a39f64a3" containerName="collect-profiles" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.051462 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.064441 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rhz7j"] Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.202529 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-utilities\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.203002 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-catalog-content\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.203093 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64tlk\" (UniqueName: \"kubernetes.io/projected/9130db48-13ec-46ec-9c4c-42ad92f93102-kube-api-access-64tlk\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.305482 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-catalog-content\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.305638 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64tlk\" (UniqueName: \"kubernetes.io/projected/9130db48-13ec-46ec-9c4c-42ad92f93102-kube-api-access-64tlk\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.306051 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-catalog-content\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.306088 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-utilities\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.306527 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-utilities\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.800224 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64tlk\" (UniqueName: \"kubernetes.io/projected/9130db48-13ec-46ec-9c4c-42ad92f93102-kube-api-access-64tlk\") pod \"redhat-operators-rhz7j\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:47 crc kubenswrapper[4701]: I1121 20:31:47.997910 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:48 crc kubenswrapper[4701]: I1121 20:31:48.556667 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rhz7j"] Nov 21 20:31:49 crc kubenswrapper[4701]: I1121 20:31:49.461883 4701 generic.go:334] "Generic (PLEG): container finished" podID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerID="c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8" exitCode=0 Nov 21 20:31:49 crc kubenswrapper[4701]: I1121 20:31:49.461985 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerDied","Data":"c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8"} Nov 21 20:31:49 crc kubenswrapper[4701]: I1121 20:31:49.462418 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerStarted","Data":"6c94dddef437eccc0ef37d520eeb5380631689a3251f0be8e9552033f580efa1"} Nov 21 20:31:50 crc kubenswrapper[4701]: I1121 20:31:50.475477 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerStarted","Data":"abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209"} Nov 21 20:31:54 crc kubenswrapper[4701]: I1121 20:31:54.535608 4701 generic.go:334] "Generic (PLEG): container finished" podID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerID="abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209" exitCode=0 Nov 21 20:31:54 crc kubenswrapper[4701]: I1121 20:31:54.535744 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerDied","Data":"abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209"} Nov 21 20:31:55 crc kubenswrapper[4701]: I1121 20:31:55.550977 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerStarted","Data":"a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8"} Nov 21 20:31:55 crc kubenswrapper[4701]: I1121 20:31:55.582762 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rhz7j" podStartSLOduration=3.114958146 podStartE2EDuration="8.582740685s" podCreationTimestamp="2025-11-21 20:31:47 +0000 UTC" firstStartedPulling="2025-11-21 20:31:49.465921924 +0000 UTC m=+5400.251061981" lastFinishedPulling="2025-11-21 20:31:54.933704493 +0000 UTC m=+5405.718844520" observedRunningTime="2025-11-21 20:31:55.579039686 +0000 UTC m=+5406.364179753" watchObservedRunningTime="2025-11-21 20:31:55.582740685 +0000 UTC m=+5406.367880722" Nov 21 20:31:57 crc kubenswrapper[4701]: I1121 20:31:57.998148 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:58 crc kubenswrapper[4701]: I1121 20:31:57.998708 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:31:59 crc kubenswrapper[4701]: I1121 20:31:59.075843 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rhz7j" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="registry-server" probeResult="failure" output=< Nov 21 20:31:59 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:31:59 crc kubenswrapper[4701]: > Nov 21 20:32:08 crc kubenswrapper[4701]: I1121 20:32:08.054678 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:32:08 crc kubenswrapper[4701]: I1121 20:32:08.114605 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:32:08 crc kubenswrapper[4701]: I1121 20:32:08.305067 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rhz7j"] Nov 21 20:32:09 crc kubenswrapper[4701]: I1121 20:32:09.728956 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rhz7j" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="registry-server" containerID="cri-o://a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8" gracePeriod=2 Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.303944 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.496996 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64tlk\" (UniqueName: \"kubernetes.io/projected/9130db48-13ec-46ec-9c4c-42ad92f93102-kube-api-access-64tlk\") pod \"9130db48-13ec-46ec-9c4c-42ad92f93102\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.497243 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-utilities\") pod \"9130db48-13ec-46ec-9c4c-42ad92f93102\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.497289 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-catalog-content\") pod \"9130db48-13ec-46ec-9c4c-42ad92f93102\" (UID: \"9130db48-13ec-46ec-9c4c-42ad92f93102\") " Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.498517 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-utilities" (OuterVolumeSpecName: "utilities") pod "9130db48-13ec-46ec-9c4c-42ad92f93102" (UID: "9130db48-13ec-46ec-9c4c-42ad92f93102"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.506640 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9130db48-13ec-46ec-9c4c-42ad92f93102-kube-api-access-64tlk" (OuterVolumeSpecName: "kube-api-access-64tlk") pod "9130db48-13ec-46ec-9c4c-42ad92f93102" (UID: "9130db48-13ec-46ec-9c4c-42ad92f93102"). InnerVolumeSpecName "kube-api-access-64tlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.601857 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64tlk\" (UniqueName: \"kubernetes.io/projected/9130db48-13ec-46ec-9c4c-42ad92f93102-kube-api-access-64tlk\") on node \"crc\" DevicePath \"\"" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.601911 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.607524 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9130db48-13ec-46ec-9c4c-42ad92f93102" (UID: "9130db48-13ec-46ec-9c4c-42ad92f93102"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.703792 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9130db48-13ec-46ec-9c4c-42ad92f93102-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.742814 4701 generic.go:334] "Generic (PLEG): container finished" podID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerID="a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8" exitCode=0 Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.742893 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerDied","Data":"a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8"} Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.742928 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhz7j" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.742961 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhz7j" event={"ID":"9130db48-13ec-46ec-9c4c-42ad92f93102","Type":"ContainerDied","Data":"6c94dddef437eccc0ef37d520eeb5380631689a3251f0be8e9552033f580efa1"} Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.743001 4701 scope.go:117] "RemoveContainer" containerID="a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.764387 4701 scope.go:117] "RemoveContainer" containerID="abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.789404 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rhz7j"] Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.801967 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rhz7j"] Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.814626 4701 scope.go:117] "RemoveContainer" containerID="c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.880562 4701 scope.go:117] "RemoveContainer" containerID="a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8" Nov 21 20:32:10 crc kubenswrapper[4701]: E1121 20:32:10.881085 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8\": container with ID starting with a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8 not found: ID does not exist" containerID="a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.881129 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8"} err="failed to get container status \"a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8\": rpc error: code = NotFound desc = could not find container \"a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8\": container with ID starting with a3990c717ca88ed59f87ad66ba35e59338714cb8139857264d349f5de969dcd8 not found: ID does not exist" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.881156 4701 scope.go:117] "RemoveContainer" containerID="abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209" Nov 21 20:32:10 crc kubenswrapper[4701]: E1121 20:32:10.881572 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209\": container with ID starting with abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209 not found: ID does not exist" containerID="abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.881600 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209"} err="failed to get container status \"abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209\": rpc error: code = NotFound desc = could not find container \"abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209\": container with ID starting with abe8768c7468c840ecede780405b72a64ce497976d844416101ce94663b1e209 not found: ID does not exist" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.881618 4701 scope.go:117] "RemoveContainer" containerID="c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8" Nov 21 20:32:10 crc kubenswrapper[4701]: E1121 20:32:10.882090 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8\": container with ID starting with c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8 not found: ID does not exist" containerID="c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8" Nov 21 20:32:10 crc kubenswrapper[4701]: I1121 20:32:10.882114 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8"} err="failed to get container status \"c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8\": rpc error: code = NotFound desc = could not find container \"c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8\": container with ID starting with c15029d6763d858e93f941b90c1a2c126401f17d1c0d71c6e4b78c2eadfe92d8 not found: ID does not exist" Nov 21 20:32:11 crc kubenswrapper[4701]: I1121 20:32:11.974442 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" path="/var/lib/kubelet/pods/9130db48-13ec-46ec-9c4c-42ad92f93102/volumes" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.665130 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mpwmt"] Nov 21 20:33:32 crc kubenswrapper[4701]: E1121 20:33:32.666372 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="registry-server" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.666393 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="registry-server" Nov 21 20:33:32 crc kubenswrapper[4701]: E1121 20:33:32.666422 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="extract-content" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.666436 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="extract-content" Nov 21 20:33:32 crc kubenswrapper[4701]: E1121 20:33:32.666488 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="extract-utilities" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.666500 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="extract-utilities" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.666858 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="9130db48-13ec-46ec-9c4c-42ad92f93102" containerName="registry-server" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.669394 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.677054 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mpwmt"] Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.723926 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-utilities\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.723987 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-catalog-content\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.724721 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpld7\" (UniqueName: \"kubernetes.io/projected/5c58ffda-2863-4229-b4cd-cb93f106e94b-kube-api-access-xpld7\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.827417 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-utilities\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.827479 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-catalog-content\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.827724 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpld7\" (UniqueName: \"kubernetes.io/projected/5c58ffda-2863-4229-b4cd-cb93f106e94b-kube-api-access-xpld7\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.828777 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-utilities\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.829224 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-catalog-content\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:32 crc kubenswrapper[4701]: I1121 20:33:32.863634 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpld7\" (UniqueName: \"kubernetes.io/projected/5c58ffda-2863-4229-b4cd-cb93f106e94b-kube-api-access-xpld7\") pod \"community-operators-mpwmt\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:33 crc kubenswrapper[4701]: I1121 20:33:33.005586 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:33 crc kubenswrapper[4701]: I1121 20:33:33.637720 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mpwmt"] Nov 21 20:33:33 crc kubenswrapper[4701]: I1121 20:33:33.832131 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerStarted","Data":"63a45d5868caeb5e4d466842d803f8b72bb210b1ce631c3369bd1238915764e4"} Nov 21 20:33:34 crc kubenswrapper[4701]: I1121 20:33:34.842966 4701 generic.go:334] "Generic (PLEG): container finished" podID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerID="e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8" exitCode=0 Nov 21 20:33:34 crc kubenswrapper[4701]: I1121 20:33:34.843051 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerDied","Data":"e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8"} Nov 21 20:33:37 crc kubenswrapper[4701]: I1121 20:33:37.886741 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerStarted","Data":"893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a"} Nov 21 20:33:38 crc kubenswrapper[4701]: I1121 20:33:38.905580 4701 generic.go:334] "Generic (PLEG): container finished" podID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerID="893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a" exitCode=0 Nov 21 20:33:38 crc kubenswrapper[4701]: I1121 20:33:38.905657 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerDied","Data":"893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a"} Nov 21 20:33:40 crc kubenswrapper[4701]: I1121 20:33:40.940292 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerStarted","Data":"d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17"} Nov 21 20:33:40 crc kubenswrapper[4701]: I1121 20:33:40.986906 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mpwmt" podStartSLOduration=4.533314569 podStartE2EDuration="8.986880697s" podCreationTimestamp="2025-11-21 20:33:32 +0000 UTC" firstStartedPulling="2025-11-21 20:33:34.845587353 +0000 UTC m=+5505.630727380" lastFinishedPulling="2025-11-21 20:33:39.299153441 +0000 UTC m=+5510.084293508" observedRunningTime="2025-11-21 20:33:40.961046369 +0000 UTC m=+5511.746186396" watchObservedRunningTime="2025-11-21 20:33:40.986880697 +0000 UTC m=+5511.772020724" Nov 21 20:33:43 crc kubenswrapper[4701]: I1121 20:33:43.006671 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:43 crc kubenswrapper[4701]: I1121 20:33:43.007239 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:43 crc kubenswrapper[4701]: I1121 20:33:43.080879 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:48 crc kubenswrapper[4701]: I1121 20:33:48.614000 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:33:48 crc kubenswrapper[4701]: I1121 20:33:48.614871 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:33:53 crc kubenswrapper[4701]: I1121 20:33:53.105387 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:53 crc kubenswrapper[4701]: I1121 20:33:53.188306 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mpwmt"] Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.100631 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mpwmt" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="registry-server" containerID="cri-o://d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17" gracePeriod=2 Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.735342 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.749937 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpld7\" (UniqueName: \"kubernetes.io/projected/5c58ffda-2863-4229-b4cd-cb93f106e94b-kube-api-access-xpld7\") pod \"5c58ffda-2863-4229-b4cd-cb93f106e94b\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.750045 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-catalog-content\") pod \"5c58ffda-2863-4229-b4cd-cb93f106e94b\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.750612 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-utilities\") pod \"5c58ffda-2863-4229-b4cd-cb93f106e94b\" (UID: \"5c58ffda-2863-4229-b4cd-cb93f106e94b\") " Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.751650 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-utilities" (OuterVolumeSpecName: "utilities") pod "5c58ffda-2863-4229-b4cd-cb93f106e94b" (UID: "5c58ffda-2863-4229-b4cd-cb93f106e94b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.769491 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c58ffda-2863-4229-b4cd-cb93f106e94b-kube-api-access-xpld7" (OuterVolumeSpecName: "kube-api-access-xpld7") pod "5c58ffda-2863-4229-b4cd-cb93f106e94b" (UID: "5c58ffda-2863-4229-b4cd-cb93f106e94b"). InnerVolumeSpecName "kube-api-access-xpld7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.810590 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c58ffda-2863-4229-b4cd-cb93f106e94b" (UID: "5c58ffda-2863-4229-b4cd-cb93f106e94b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.854379 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.854434 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpld7\" (UniqueName: \"kubernetes.io/projected/5c58ffda-2863-4229-b4cd-cb93f106e94b-kube-api-access-xpld7\") on node \"crc\" DevicePath \"\"" Nov 21 20:33:54 crc kubenswrapper[4701]: I1121 20:33:54.854453 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c58ffda-2863-4229-b4cd-cb93f106e94b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.112782 4701 generic.go:334] "Generic (PLEG): container finished" podID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerID="d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17" exitCode=0 Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.112851 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerDied","Data":"d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17"} Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.112889 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpwmt" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.112913 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpwmt" event={"ID":"5c58ffda-2863-4229-b4cd-cb93f106e94b","Type":"ContainerDied","Data":"63a45d5868caeb5e4d466842d803f8b72bb210b1ce631c3369bd1238915764e4"} Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.112942 4701 scope.go:117] "RemoveContainer" containerID="d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.139591 4701 scope.go:117] "RemoveContainer" containerID="893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.170542 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mpwmt"] Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.183797 4701 scope.go:117] "RemoveContainer" containerID="e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.189946 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mpwmt"] Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.240973 4701 scope.go:117] "RemoveContainer" containerID="d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17" Nov 21 20:33:55 crc kubenswrapper[4701]: E1121 20:33:55.242221 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17\": container with ID starting with d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17 not found: ID does not exist" containerID="d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.242263 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17"} err="failed to get container status \"d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17\": rpc error: code = NotFound desc = could not find container \"d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17\": container with ID starting with d3d12374d0997b45e14d55a39ef42d425b8930bb4c574ef1138c79427a6a6c17 not found: ID does not exist" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.242288 4701 scope.go:117] "RemoveContainer" containerID="893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a" Nov 21 20:33:55 crc kubenswrapper[4701]: E1121 20:33:55.243221 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a\": container with ID starting with 893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a not found: ID does not exist" containerID="893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.243278 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a"} err="failed to get container status \"893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a\": rpc error: code = NotFound desc = could not find container \"893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a\": container with ID starting with 893cc48323d672b9a7a9148c0973f18661642dbec93c44542cb8d54dd962779a not found: ID does not exist" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.243310 4701 scope.go:117] "RemoveContainer" containerID="e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8" Nov 21 20:33:55 crc kubenswrapper[4701]: E1121 20:33:55.243996 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8\": container with ID starting with e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8 not found: ID does not exist" containerID="e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.244068 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8"} err="failed to get container status \"e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8\": rpc error: code = NotFound desc = could not find container \"e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8\": container with ID starting with e2b4e6fa00ffdf9dbfd7f103dc7942130d26d5c3aa7dee2bb71b548cc097e3b8 not found: ID does not exist" Nov 21 20:33:55 crc kubenswrapper[4701]: I1121 20:33:55.984137 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" path="/var/lib/kubelet/pods/5c58ffda-2863-4229-b4cd-cb93f106e94b/volumes" Nov 21 20:34:18 crc kubenswrapper[4701]: I1121 20:34:18.613143 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:34:18 crc kubenswrapper[4701]: I1121 20:34:18.614086 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.613585 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.614333 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.614389 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.615495 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5dc02419272f9e53951b6370a00d7483b73cabdde05d29b33488b3f2b2da447"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.615580 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://d5dc02419272f9e53951b6370a00d7483b73cabdde05d29b33488b3f2b2da447" gracePeriod=600 Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.825131 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="d5dc02419272f9e53951b6370a00d7483b73cabdde05d29b33488b3f2b2da447" exitCode=0 Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.825253 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"d5dc02419272f9e53951b6370a00d7483b73cabdde05d29b33488b3f2b2da447"} Nov 21 20:34:48 crc kubenswrapper[4701]: I1121 20:34:48.825661 4701 scope.go:117] "RemoveContainer" containerID="4f3ba76f10556bd702c86481c6c460b3f01a37c542d060ca61ebacc11459c7ad" Nov 21 20:34:49 crc kubenswrapper[4701]: I1121 20:34:49.841102 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233"} Nov 21 20:36:22 crc kubenswrapper[4701]: I1121 20:36:22.117620 4701 generic.go:334] "Generic (PLEG): container finished" podID="6dd5f296-841e-4527-88fe-3963fef0e450" containerID="1b0522a64f939a6056e3229dbc55e26a865e231559bdefc50e4b89c5feada5a5" exitCode=0 Nov 21 20:36:22 crc kubenswrapper[4701]: I1121 20:36:22.117743 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"6dd5f296-841e-4527-88fe-3963fef0e450","Type":"ContainerDied","Data":"1b0522a64f939a6056e3229dbc55e26a865e231559bdefc50e4b89c5feada5a5"} Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.579950 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.656858 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ssh-key\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.656942 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.656975 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config-secret\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.657134 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.657195 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-temporary\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.657273 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-workdir\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.657400 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ca-certs\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.657551 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dd49g\" (UniqueName: \"kubernetes.io/projected/6dd5f296-841e-4527-88fe-3963fef0e450-kube-api-access-dd49g\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.657706 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-config-data\") pod \"6dd5f296-841e-4527-88fe-3963fef0e450\" (UID: \"6dd5f296-841e-4527-88fe-3963fef0e450\") " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.658677 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.659983 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-config-data" (OuterVolumeSpecName: "config-data") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.662096 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.667316 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "test-operator-logs") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.674413 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dd5f296-841e-4527-88fe-3963fef0e450-kube-api-access-dd49g" (OuterVolumeSpecName: "kube-api-access-dd49g") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "kube-api-access-dd49g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.692663 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.704030 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.718266 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.747637 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "6dd5f296-841e-4527-88fe-3963fef0e450" (UID: "6dd5f296-841e-4527-88fe-3963fef0e450"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.760747 4701 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.760878 4701 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/6dd5f296-841e-4527-88fe-3963fef0e450-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.760941 4701 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.760999 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dd49g\" (UniqueName: \"kubernetes.io/projected/6dd5f296-841e-4527-88fe-3963fef0e450-kube-api-access-dd49g\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.761063 4701 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.761117 4701 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.761171 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.761248 4701 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6dd5f296-841e-4527-88fe-3963fef0e450-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.761328 4701 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.795167 4701 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 21 20:36:23 crc kubenswrapper[4701]: I1121 20:36:23.863746 4701 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 21 20:36:24 crc kubenswrapper[4701]: I1121 20:36:24.162578 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"6dd5f296-841e-4527-88fe-3963fef0e450","Type":"ContainerDied","Data":"b592f74d11522362fa7dfb24fd1bd1f37eff18103eb5ab2d3a13bb9827c55783"} Nov 21 20:36:24 crc kubenswrapper[4701]: I1121 20:36:24.162619 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b592f74d11522362fa7dfb24fd1bd1f37eff18103eb5ab2d3a13bb9827c55783" Nov 21 20:36:24 crc kubenswrapper[4701]: I1121 20:36:24.162747 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.211253 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 20:36:31 crc kubenswrapper[4701]: E1121 20:36:31.212670 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dd5f296-841e-4527-88fe-3963fef0e450" containerName="tempest-tests-tempest-tests-runner" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.212690 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dd5f296-841e-4527-88fe-3963fef0e450" containerName="tempest-tests-tempest-tests-runner" Nov 21 20:36:31 crc kubenswrapper[4701]: E1121 20:36:31.212714 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="registry-server" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.212724 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="registry-server" Nov 21 20:36:31 crc kubenswrapper[4701]: E1121 20:36:31.212755 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="extract-content" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.212763 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="extract-content" Nov 21 20:36:31 crc kubenswrapper[4701]: E1121 20:36:31.212774 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="extract-utilities" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.212782 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="extract-utilities" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.213051 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dd5f296-841e-4527-88fe-3963fef0e450" containerName="tempest-tests-tempest-tests-runner" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.213086 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c58ffda-2863-4229-b4cd-cb93f106e94b" containerName="registry-server" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.215962 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.220152 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjzf9" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.227885 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.385273 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.385340 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6hvk\" (UniqueName: \"kubernetes.io/projected/e641415e-cd44-42d3-b2be-d1b45a79297a-kube-api-access-s6hvk\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.488957 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.489048 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6hvk\" (UniqueName: \"kubernetes.io/projected/e641415e-cd44-42d3-b2be-d1b45a79297a-kube-api-access-s6hvk\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.490168 4701 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.543738 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.544748 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6hvk\" (UniqueName: \"kubernetes.io/projected/e641415e-cd44-42d3-b2be-d1b45a79297a-kube-api-access-s6hvk\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"e641415e-cd44-42d3-b2be-d1b45a79297a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:31 crc kubenswrapper[4701]: I1121 20:36:31.558040 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 20:36:32 crc kubenswrapper[4701]: I1121 20:36:32.119094 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 20:36:32 crc kubenswrapper[4701]: W1121 20:36:32.134592 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode641415e_cd44_42d3_b2be_d1b45a79297a.slice/crio-d0b3bc282a19e3a90e0669ecb6ddb98cd33f611802dff6f85d7d8d42a8f19c7e WatchSource:0}: Error finding container d0b3bc282a19e3a90e0669ecb6ddb98cd33f611802dff6f85d7d8d42a8f19c7e: Status 404 returned error can't find the container with id d0b3bc282a19e3a90e0669ecb6ddb98cd33f611802dff6f85d7d8d42a8f19c7e Nov 21 20:36:32 crc kubenswrapper[4701]: I1121 20:36:32.150744 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:36:32 crc kubenswrapper[4701]: I1121 20:36:32.278368 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"e641415e-cd44-42d3-b2be-d1b45a79297a","Type":"ContainerStarted","Data":"d0b3bc282a19e3a90e0669ecb6ddb98cd33f611802dff6f85d7d8d42a8f19c7e"} Nov 21 20:36:34 crc kubenswrapper[4701]: I1121 20:36:34.307096 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"e641415e-cd44-42d3-b2be-d1b45a79297a","Type":"ContainerStarted","Data":"85d17a2a81f60fa60d61141a231b59bfa8b93e3a350395f4c11bb99bca3849a2"} Nov 21 20:36:34 crc kubenswrapper[4701]: I1121 20:36:34.326483 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.385373832 podStartE2EDuration="3.326446335s" podCreationTimestamp="2025-11-21 20:36:31 +0000 UTC" firstStartedPulling="2025-11-21 20:36:32.150434799 +0000 UTC m=+5682.935574826" lastFinishedPulling="2025-11-21 20:36:33.091507292 +0000 UTC m=+5683.876647329" observedRunningTime="2025-11-21 20:36:34.325937602 +0000 UTC m=+5685.111077679" watchObservedRunningTime="2025-11-21 20:36:34.326446335 +0000 UTC m=+5685.111586422" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.223724 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fspj5/must-gather-4xwmw"] Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.237951 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.243096 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fspj5"/"kube-root-ca.crt" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.243418 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fspj5/must-gather-4xwmw"] Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.244656 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fspj5"/"openshift-service-ca.crt" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.259691 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-fspj5"/"default-dockercfg-8smkm" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.352785 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9qww\" (UniqueName: \"kubernetes.io/projected/74b4c785-a4b8-4ac3-8906-0cb78c310784-kube-api-access-j9qww\") pod \"must-gather-4xwmw\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.352916 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/74b4c785-a4b8-4ac3-8906-0cb78c310784-must-gather-output\") pod \"must-gather-4xwmw\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.455218 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9qww\" (UniqueName: \"kubernetes.io/projected/74b4c785-a4b8-4ac3-8906-0cb78c310784-kube-api-access-j9qww\") pod \"must-gather-4xwmw\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.455337 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/74b4c785-a4b8-4ac3-8906-0cb78c310784-must-gather-output\") pod \"must-gather-4xwmw\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.455801 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/74b4c785-a4b8-4ac3-8906-0cb78c310784-must-gather-output\") pod \"must-gather-4xwmw\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.480985 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9qww\" (UniqueName: \"kubernetes.io/projected/74b4c785-a4b8-4ac3-8906-0cb78c310784-kube-api-access-j9qww\") pod \"must-gather-4xwmw\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:36:59 crc kubenswrapper[4701]: I1121 20:36:59.586539 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:37:00 crc kubenswrapper[4701]: I1121 20:37:00.108595 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fspj5/must-gather-4xwmw"] Nov 21 20:37:00 crc kubenswrapper[4701]: I1121 20:37:00.680112 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/must-gather-4xwmw" event={"ID":"74b4c785-a4b8-4ac3-8906-0cb78c310784","Type":"ContainerStarted","Data":"a98398c3c889ebbfc053b4b9e0b57bc867850383dcd93d3eb240ca3b64b01e2e"} Nov 21 20:37:08 crc kubenswrapper[4701]: I1121 20:37:08.782242 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/must-gather-4xwmw" event={"ID":"74b4c785-a4b8-4ac3-8906-0cb78c310784","Type":"ContainerStarted","Data":"e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e"} Nov 21 20:37:08 crc kubenswrapper[4701]: I1121 20:37:08.783013 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/must-gather-4xwmw" event={"ID":"74b4c785-a4b8-4ac3-8906-0cb78c310784","Type":"ContainerStarted","Data":"0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c"} Nov 21 20:37:08 crc kubenswrapper[4701]: I1121 20:37:08.814533 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fspj5/must-gather-4xwmw" podStartSLOduration=1.885139973 podStartE2EDuration="9.814507385s" podCreationTimestamp="2025-11-21 20:36:59 +0000 UTC" firstStartedPulling="2025-11-21 20:37:00.118964279 +0000 UTC m=+5710.904104326" lastFinishedPulling="2025-11-21 20:37:08.048331711 +0000 UTC m=+5718.833471738" observedRunningTime="2025-11-21 20:37:08.805244919 +0000 UTC m=+5719.590384946" watchObservedRunningTime="2025-11-21 20:37:08.814507385 +0000 UTC m=+5719.599647432" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.588128 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fspj5/crc-debug-pjhbf"] Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.590398 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.708127 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/671d85cf-cc63-4984-9d8e-e0613b699227-host\") pod \"crc-debug-pjhbf\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.708634 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jddzm\" (UniqueName: \"kubernetes.io/projected/671d85cf-cc63-4984-9d8e-e0613b699227-kube-api-access-jddzm\") pod \"crc-debug-pjhbf\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.811011 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/671d85cf-cc63-4984-9d8e-e0613b699227-host\") pod \"crc-debug-pjhbf\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.811266 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jddzm\" (UniqueName: \"kubernetes.io/projected/671d85cf-cc63-4984-9d8e-e0613b699227-kube-api-access-jddzm\") pod \"crc-debug-pjhbf\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.811264 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/671d85cf-cc63-4984-9d8e-e0613b699227-host\") pod \"crc-debug-pjhbf\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.833738 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jddzm\" (UniqueName: \"kubernetes.io/projected/671d85cf-cc63-4984-9d8e-e0613b699227-kube-api-access-jddzm\") pod \"crc-debug-pjhbf\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:12 crc kubenswrapper[4701]: I1121 20:37:12.911444 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:37:13 crc kubenswrapper[4701]: I1121 20:37:13.843716 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" event={"ID":"671d85cf-cc63-4984-9d8e-e0613b699227","Type":"ContainerStarted","Data":"8d5b2b605b9bada2aa8567a2cb6e898901f22119bb8623db2394d056c4fa25af"} Nov 21 20:37:14 crc kubenswrapper[4701]: E1121 20:37:14.551905 4701 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.12:53388->38.102.83.12:39339: write tcp 38.102.83.12:53388->38.102.83.12:39339: write: connection reset by peer Nov 21 20:37:18 crc kubenswrapper[4701]: I1121 20:37:18.613326 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:37:18 crc kubenswrapper[4701]: I1121 20:37:18.613960 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:37:23 crc kubenswrapper[4701]: I1121 20:37:23.966875 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" event={"ID":"671d85cf-cc63-4984-9d8e-e0613b699227","Type":"ContainerStarted","Data":"76f6f8205e8985f8472522d4f50867938a99271c700edec90145423a43b9af62"} Nov 21 20:37:23 crc kubenswrapper[4701]: I1121 20:37:23.983814 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" podStartSLOduration=1.421799968 podStartE2EDuration="11.983793313s" podCreationTimestamp="2025-11-21 20:37:12 +0000 UTC" firstStartedPulling="2025-11-21 20:37:12.979936256 +0000 UTC m=+5723.765076283" lastFinishedPulling="2025-11-21 20:37:23.541929581 +0000 UTC m=+5734.327069628" observedRunningTime="2025-11-21 20:37:23.98219056 +0000 UTC m=+5734.767330587" watchObservedRunningTime="2025-11-21 20:37:23.983793313 +0000 UTC m=+5734.768933340" Nov 21 20:37:48 crc kubenswrapper[4701]: I1121 20:37:48.613308 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:37:48 crc kubenswrapper[4701]: I1121 20:37:48.613875 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:38:13 crc kubenswrapper[4701]: I1121 20:38:13.527662 4701 generic.go:334] "Generic (PLEG): container finished" podID="671d85cf-cc63-4984-9d8e-e0613b699227" containerID="76f6f8205e8985f8472522d4f50867938a99271c700edec90145423a43b9af62" exitCode=0 Nov 21 20:38:13 crc kubenswrapper[4701]: I1121 20:38:13.527867 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" event={"ID":"671d85cf-cc63-4984-9d8e-e0613b699227","Type":"ContainerDied","Data":"76f6f8205e8985f8472522d4f50867938a99271c700edec90145423a43b9af62"} Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.667744 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.714562 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fspj5/crc-debug-pjhbf"] Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.728359 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fspj5/crc-debug-pjhbf"] Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.819450 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/671d85cf-cc63-4984-9d8e-e0613b699227-host\") pod \"671d85cf-cc63-4984-9d8e-e0613b699227\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.819591 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jddzm\" (UniqueName: \"kubernetes.io/projected/671d85cf-cc63-4984-9d8e-e0613b699227-kube-api-access-jddzm\") pod \"671d85cf-cc63-4984-9d8e-e0613b699227\" (UID: \"671d85cf-cc63-4984-9d8e-e0613b699227\") " Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.819724 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/671d85cf-cc63-4984-9d8e-e0613b699227-host" (OuterVolumeSpecName: "host") pod "671d85cf-cc63-4984-9d8e-e0613b699227" (UID: "671d85cf-cc63-4984-9d8e-e0613b699227"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 20:38:14 crc kubenswrapper[4701]: I1121 20:38:14.820337 4701 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/671d85cf-cc63-4984-9d8e-e0613b699227-host\") on node \"crc\" DevicePath \"\"" Nov 21 20:38:15 crc kubenswrapper[4701]: I1121 20:38:15.307646 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/671d85cf-cc63-4984-9d8e-e0613b699227-kube-api-access-jddzm" (OuterVolumeSpecName: "kube-api-access-jddzm") pod "671d85cf-cc63-4984-9d8e-e0613b699227" (UID: "671d85cf-cc63-4984-9d8e-e0613b699227"). InnerVolumeSpecName "kube-api-access-jddzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:38:15 crc kubenswrapper[4701]: I1121 20:38:15.332580 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jddzm\" (UniqueName: \"kubernetes.io/projected/671d85cf-cc63-4984-9d8e-e0613b699227-kube-api-access-jddzm\") on node \"crc\" DevicePath \"\"" Nov 21 20:38:15 crc kubenswrapper[4701]: I1121 20:38:15.551469 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d5b2b605b9bada2aa8567a2cb6e898901f22119bb8623db2394d056c4fa25af" Nov 21 20:38:15 crc kubenswrapper[4701]: I1121 20:38:15.551520 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-pjhbf" Nov 21 20:38:15 crc kubenswrapper[4701]: I1121 20:38:15.985120 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="671d85cf-cc63-4984-9d8e-e0613b699227" path="/var/lib/kubelet/pods/671d85cf-cc63-4984-9d8e-e0613b699227/volumes" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.007114 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fspj5/crc-debug-lrbmb"] Nov 21 20:38:16 crc kubenswrapper[4701]: E1121 20:38:16.007716 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="671d85cf-cc63-4984-9d8e-e0613b699227" containerName="container-00" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.007753 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="671d85cf-cc63-4984-9d8e-e0613b699227" containerName="container-00" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.008093 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="671d85cf-cc63-4984-9d8e-e0613b699227" containerName="container-00" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.008993 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.159252 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2fee2440-acaa-475b-981e-e212c9735739-host\") pod \"crc-debug-lrbmb\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.160140 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j82xp\" (UniqueName: \"kubernetes.io/projected/2fee2440-acaa-475b-981e-e212c9735739-kube-api-access-j82xp\") pod \"crc-debug-lrbmb\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.262270 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2fee2440-acaa-475b-981e-e212c9735739-host\") pod \"crc-debug-lrbmb\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.262346 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j82xp\" (UniqueName: \"kubernetes.io/projected/2fee2440-acaa-475b-981e-e212c9735739-kube-api-access-j82xp\") pod \"crc-debug-lrbmb\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.262487 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2fee2440-acaa-475b-981e-e212c9735739-host\") pod \"crc-debug-lrbmb\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.293937 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j82xp\" (UniqueName: \"kubernetes.io/projected/2fee2440-acaa-475b-981e-e212c9735739-kube-api-access-j82xp\") pod \"crc-debug-lrbmb\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.330253 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:16 crc kubenswrapper[4701]: I1121 20:38:16.568748 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" event={"ID":"2fee2440-acaa-475b-981e-e212c9735739","Type":"ContainerStarted","Data":"30b4c70122af361a0ebd649620c21ca85ed24914a2f9a1b10cbe96de4895efea"} Nov 21 20:38:17 crc kubenswrapper[4701]: I1121 20:38:17.581366 4701 generic.go:334] "Generic (PLEG): container finished" podID="2fee2440-acaa-475b-981e-e212c9735739" containerID="64dd3ffa7f62b8fb727766ab3216e32663922ba79df50f7170cf75c8222b0604" exitCode=0 Nov 21 20:38:17 crc kubenswrapper[4701]: I1121 20:38:17.581480 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" event={"ID":"2fee2440-acaa-475b-981e-e212c9735739","Type":"ContainerDied","Data":"64dd3ffa7f62b8fb727766ab3216e32663922ba79df50f7170cf75c8222b0604"} Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.618322 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.619156 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.619362 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.622185 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.622335 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" gracePeriod=600 Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.719979 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:18 crc kubenswrapper[4701]: E1121 20:38:18.767357 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.821591 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j82xp\" (UniqueName: \"kubernetes.io/projected/2fee2440-acaa-475b-981e-e212c9735739-kube-api-access-j82xp\") pod \"2fee2440-acaa-475b-981e-e212c9735739\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.821813 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2fee2440-acaa-475b-981e-e212c9735739-host\") pod \"2fee2440-acaa-475b-981e-e212c9735739\" (UID: \"2fee2440-acaa-475b-981e-e212c9735739\") " Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.822059 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fee2440-acaa-475b-981e-e212c9735739-host" (OuterVolumeSpecName: "host") pod "2fee2440-acaa-475b-981e-e212c9735739" (UID: "2fee2440-acaa-475b-981e-e212c9735739"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.827962 4701 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2fee2440-acaa-475b-981e-e212c9735739-host\") on node \"crc\" DevicePath \"\"" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.829339 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fee2440-acaa-475b-981e-e212c9735739-kube-api-access-j82xp" (OuterVolumeSpecName: "kube-api-access-j82xp") pod "2fee2440-acaa-475b-981e-e212c9735739" (UID: "2fee2440-acaa-475b-981e-e212c9735739"). InnerVolumeSpecName "kube-api-access-j82xp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:38:18 crc kubenswrapper[4701]: I1121 20:38:18.930501 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j82xp\" (UniqueName: \"kubernetes.io/projected/2fee2440-acaa-475b-981e-e212c9735739-kube-api-access-j82xp\") on node \"crc\" DevicePath \"\"" Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.601634 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" event={"ID":"2fee2440-acaa-475b-981e-e212c9735739","Type":"ContainerDied","Data":"30b4c70122af361a0ebd649620c21ca85ed24914a2f9a1b10cbe96de4895efea"} Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.601942 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30b4c70122af361a0ebd649620c21ca85ed24914a2f9a1b10cbe96de4895efea" Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.601725 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-lrbmb" Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.606308 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" exitCode=0 Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.606368 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233"} Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.606414 4701 scope.go:117] "RemoveContainer" containerID="d5dc02419272f9e53951b6370a00d7483b73cabdde05d29b33488b3f2b2da447" Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.607234 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:38:19 crc kubenswrapper[4701]: E1121 20:38:19.607532 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:38:19 crc kubenswrapper[4701]: E1121 20:38:19.730664 4701 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fee2440_acaa_475b_981e_e212c9735739.slice/crio-30b4c70122af361a0ebd649620c21ca85ed24914a2f9a1b10cbe96de4895efea\": RecentStats: unable to find data in memory cache]" Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.860835 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fspj5/crc-debug-lrbmb"] Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.879491 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fspj5/crc-debug-lrbmb"] Nov 21 20:38:19 crc kubenswrapper[4701]: I1121 20:38:19.963637 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fee2440-acaa-475b-981e-e212c9735739" path="/var/lib/kubelet/pods/2fee2440-acaa-475b-981e-e212c9735739/volumes" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.118320 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fspj5/crc-debug-5244s"] Nov 21 20:38:21 crc kubenswrapper[4701]: E1121 20:38:21.119146 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fee2440-acaa-475b-981e-e212c9735739" containerName="container-00" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.119159 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fee2440-acaa-475b-981e-e212c9735739" containerName="container-00" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.119418 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fee2440-acaa-475b-981e-e212c9735739" containerName="container-00" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.120233 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.291321 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp7bl\" (UniqueName: \"kubernetes.io/projected/4c7e42b4-c6d1-431c-9b7c-0533176d50db-kube-api-access-kp7bl\") pod \"crc-debug-5244s\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.291371 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4c7e42b4-c6d1-431c-9b7c-0533176d50db-host\") pod \"crc-debug-5244s\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.394287 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp7bl\" (UniqueName: \"kubernetes.io/projected/4c7e42b4-c6d1-431c-9b7c-0533176d50db-kube-api-access-kp7bl\") pod \"crc-debug-5244s\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.394338 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4c7e42b4-c6d1-431c-9b7c-0533176d50db-host\") pod \"crc-debug-5244s\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.394441 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4c7e42b4-c6d1-431c-9b7c-0533176d50db-host\") pod \"crc-debug-5244s\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.430395 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp7bl\" (UniqueName: \"kubernetes.io/projected/4c7e42b4-c6d1-431c-9b7c-0533176d50db-kube-api-access-kp7bl\") pod \"crc-debug-5244s\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.441908 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:21 crc kubenswrapper[4701]: W1121 20:38:21.488241 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c7e42b4_c6d1_431c_9b7c_0533176d50db.slice/crio-6569f60c065853467d33525617a0a071b3520845631bafefcd429629bba9fc56 WatchSource:0}: Error finding container 6569f60c065853467d33525617a0a071b3520845631bafefcd429629bba9fc56: Status 404 returned error can't find the container with id 6569f60c065853467d33525617a0a071b3520845631bafefcd429629bba9fc56 Nov 21 20:38:21 crc kubenswrapper[4701]: I1121 20:38:21.632844 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-5244s" event={"ID":"4c7e42b4-c6d1-431c-9b7c-0533176d50db","Type":"ContainerStarted","Data":"6569f60c065853467d33525617a0a071b3520845631bafefcd429629bba9fc56"} Nov 21 20:38:22 crc kubenswrapper[4701]: I1121 20:38:22.648269 4701 generic.go:334] "Generic (PLEG): container finished" podID="4c7e42b4-c6d1-431c-9b7c-0533176d50db" containerID="bd87c3067a43e9067a73746e860f47a1da10079de15366c49d8be84b32767481" exitCode=0 Nov 21 20:38:22 crc kubenswrapper[4701]: I1121 20:38:22.648651 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/crc-debug-5244s" event={"ID":"4c7e42b4-c6d1-431c-9b7c-0533176d50db","Type":"ContainerDied","Data":"bd87c3067a43e9067a73746e860f47a1da10079de15366c49d8be84b32767481"} Nov 21 20:38:22 crc kubenswrapper[4701]: I1121 20:38:22.739821 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fspj5/crc-debug-5244s"] Nov 21 20:38:22 crc kubenswrapper[4701]: I1121 20:38:22.749684 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fspj5/crc-debug-5244s"] Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.804781 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.954145 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp7bl\" (UniqueName: \"kubernetes.io/projected/4c7e42b4-c6d1-431c-9b7c-0533176d50db-kube-api-access-kp7bl\") pod \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.954448 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4c7e42b4-c6d1-431c-9b7c-0533176d50db-host\") pod \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\" (UID: \"4c7e42b4-c6d1-431c-9b7c-0533176d50db\") " Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.954566 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c7e42b4-c6d1-431c-9b7c-0533176d50db-host" (OuterVolumeSpecName: "host") pod "4c7e42b4-c6d1-431c-9b7c-0533176d50db" (UID: "4c7e42b4-c6d1-431c-9b7c-0533176d50db"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.955388 4701 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4c7e42b4-c6d1-431c-9b7c-0533176d50db-host\") on node \"crc\" DevicePath \"\"" Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.964667 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c7e42b4-c6d1-431c-9b7c-0533176d50db-kube-api-access-kp7bl" (OuterVolumeSpecName: "kube-api-access-kp7bl") pod "4c7e42b4-c6d1-431c-9b7c-0533176d50db" (UID: "4c7e42b4-c6d1-431c-9b7c-0533176d50db"). InnerVolumeSpecName "kube-api-access-kp7bl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:38:23 crc kubenswrapper[4701]: I1121 20:38:23.969679 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c7e42b4-c6d1-431c-9b7c-0533176d50db" path="/var/lib/kubelet/pods/4c7e42b4-c6d1-431c-9b7c-0533176d50db/volumes" Nov 21 20:38:24 crc kubenswrapper[4701]: I1121 20:38:24.057451 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp7bl\" (UniqueName: \"kubernetes.io/projected/4c7e42b4-c6d1-431c-9b7c-0533176d50db-kube-api-access-kp7bl\") on node \"crc\" DevicePath \"\"" Nov 21 20:38:24 crc kubenswrapper[4701]: I1121 20:38:24.673424 4701 scope.go:117] "RemoveContainer" containerID="bd87c3067a43e9067a73746e860f47a1da10079de15366c49d8be84b32767481" Nov 21 20:38:24 crc kubenswrapper[4701]: I1121 20:38:24.673510 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/crc-debug-5244s" Nov 21 20:38:30 crc kubenswrapper[4701]: I1121 20:38:30.952191 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:38:30 crc kubenswrapper[4701]: E1121 20:38:30.953259 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:38:43 crc kubenswrapper[4701]: I1121 20:38:43.951157 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:38:43 crc kubenswrapper[4701]: E1121 20:38:43.952458 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:38:53 crc kubenswrapper[4701]: I1121 20:38:53.371192 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-796cb85bf8-h88pn_59b306d9-cacf-4e38-b19f-60f8ebe026a7/barbican-api/0.log" Nov 21 20:38:53 crc kubenswrapper[4701]: I1121 20:38:53.489585 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-796cb85bf8-h88pn_59b306d9-cacf-4e38-b19f-60f8ebe026a7/barbican-api-log/0.log" Nov 21 20:38:53 crc kubenswrapper[4701]: I1121 20:38:53.590392 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-58799d9dcd-lkd7s_c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3/barbican-keystone-listener/0.log" Nov 21 20:38:53 crc kubenswrapper[4701]: I1121 20:38:53.708091 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-58799d9dcd-lkd7s_c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3/barbican-keystone-listener-log/0.log" Nov 21 20:38:53 crc kubenswrapper[4701]: I1121 20:38:53.790973 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6ff5fffc67-6nrzn_a20c7ac2-0856-4c64-8910-3c053184c47b/barbican-worker/0.log" Nov 21 20:38:53 crc kubenswrapper[4701]: I1121 20:38:53.896211 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6ff5fffc67-6nrzn_a20c7ac2-0856-4c64-8910-3c053184c47b/barbican-worker-log/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.105655 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd_d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.205129 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/ceilometer-central-agent/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.320748 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/ceilometer-notification-agent/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.346284 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/proxy-httpd/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.372874 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/sg-core/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.594995 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f/cinder-api-log/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.914497 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9c177319-2b64-45dd-a3df-47ccaca79da4/probe/0.log" Nov 21 20:38:54 crc kubenswrapper[4701]: I1121 20:38:54.954006 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:38:54 crc kubenswrapper[4701]: E1121 20:38:54.954328 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:38:55 crc kubenswrapper[4701]: I1121 20:38:55.198382 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9c177319-2b64-45dd-a3df-47ccaca79da4/cinder-backup/0.log" Nov 21 20:38:55 crc kubenswrapper[4701]: I1121 20:38:55.235350 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f/cinder-api/0.log" Nov 21 20:38:55 crc kubenswrapper[4701]: I1121 20:38:55.733767 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4b72befc-936a-4833-8e7a-f765c655a300/cinder-scheduler/0.log" Nov 21 20:38:55 crc kubenswrapper[4701]: I1121 20:38:55.805292 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4b72befc-936a-4833-8e7a-f765c655a300/probe/0.log" Nov 21 20:38:55 crc kubenswrapper[4701]: I1121 20:38:55.973647 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_fbd75a47-d6fb-407b-bf7a-9750b745b820/cinder-volume/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.060908 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_fbd75a47-d6fb-407b-bf7a-9750b745b820/probe/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.265782 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_c2b29617-ccfa-4b0c-be12-52e9e7e06c33/cinder-volume/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.337208 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-t25h9_ae68b914-c2d3-4df9-bd3c-563524bb9ded/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.345603 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_c2b29617-ccfa-4b0c-be12-52e9e7e06c33/probe/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.601353 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6_e275d37d-b55f-433f-b4be-cfba6b7b158e/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.618769 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5894fd8d75-7xcgx_7a22d8c3-ddf4-4901-b1c6-39a9099d1de6/init/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.827407 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5894fd8d75-7xcgx_7a22d8c3-ddf4-4901-b1c6-39a9099d1de6/init/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.897252 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp_775c15c9-3c73-4e78-ad8e-b02163afc9f2/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:56 crc kubenswrapper[4701]: I1121 20:38:56.998925 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5894fd8d75-7xcgx_7a22d8c3-ddf4-4901-b1c6-39a9099d1de6/dnsmasq-dns/0.log" Nov 21 20:38:57 crc kubenswrapper[4701]: I1121 20:38:57.120985 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_b405de0f-6523-4d69-b8a7-a73528f0df37/glance-httpd/0.log" Nov 21 20:38:57 crc kubenswrapper[4701]: I1121 20:38:57.136837 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_b405de0f-6523-4d69-b8a7-a73528f0df37/glance-log/0.log" Nov 21 20:38:57 crc kubenswrapper[4701]: I1121 20:38:57.281945 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_50767206-6e50-48ab-ab5f-2eee90151470/glance-log/0.log" Nov 21 20:38:57 crc kubenswrapper[4701]: I1121 20:38:57.349823 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_50767206-6e50-48ab-ab5f-2eee90151470/glance-httpd/0.log" Nov 21 20:38:58 crc kubenswrapper[4701]: I1121 20:38:58.225927 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b_b4e65662-463f-4f48-b668-1ad55aaeb9fe/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:58 crc kubenswrapper[4701]: I1121 20:38:58.324263 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6c68b8ff68-tfcgs_7d8b1846-dcd5-49b4-8eb2-74b0462538e1/horizon/0.log" Nov 21 20:38:58 crc kubenswrapper[4701]: I1121 20:38:58.565619 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-wtbcx_40c71638-2add-4f3c-acc9-cc971cad107e/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:58 crc kubenswrapper[4701]: I1121 20:38:58.777489 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29395921-cv2wk_4039a132-b77c-4449-baa0-c79e6940472f/keystone-cron/0.log" Nov 21 20:38:58 crc kubenswrapper[4701]: I1121 20:38:58.987459 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad/kube-state-metrics/0.log" Nov 21 20:38:59 crc kubenswrapper[4701]: I1121 20:38:59.168748 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6c68b8ff68-tfcgs_7d8b1846-dcd5-49b4-8eb2-74b0462538e1/horizon-log/0.log" Nov 21 20:38:59 crc kubenswrapper[4701]: I1121 20:38:59.263906 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-65d76b5c54-c9d89_dee81498-90dd-46c0-949f-c3de3b9bfbd3/keystone-api/0.log" Nov 21 20:38:59 crc kubenswrapper[4701]: I1121 20:38:59.285573 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-fls89_17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:59 crc kubenswrapper[4701]: I1121 20:38:59.690609 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5_44f05c88-6707-4ca9-a248-d5abc8ae5850/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:38:59 crc kubenswrapper[4701]: I1121 20:38:59.859292 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5b76b98545-tv4h2_1fc0c9dc-fc55-43fb-a2bb-727c01863fb5/neutron-httpd/0.log" Nov 21 20:38:59 crc kubenswrapper[4701]: I1121 20:38:59.961215 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5b76b98545-tv4h2_1fc0c9dc-fc55-43fb-a2bb-727c01863fb5/neutron-api/0.log" Nov 21 20:39:00 crc kubenswrapper[4701]: I1121 20:39:00.904800 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_6ff3d334-08d1-49a9-8483-48402c600ec2/nova-cell0-conductor-conductor/0.log" Nov 21 20:39:01 crc kubenswrapper[4701]: I1121 20:39:01.211532 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_beda6918-af8f-42ab-8599-44f3dc52229f/nova-cell1-conductor-conductor/0.log" Nov 21 20:39:01 crc kubenswrapper[4701]: I1121 20:39:01.636335 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_43fdb14d-22e9-469a-b6cb-00477daa5ece/nova-cell1-novncproxy-novncproxy/0.log" Nov 21 20:39:01 crc kubenswrapper[4701]: I1121 20:39:01.864323 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-rhwkh_a50527e7-3b38-471d-a03d-937e88e019f3/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:02 crc kubenswrapper[4701]: I1121 20:39:02.278475 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_3e2f4186-103f-4356-8b8a-80a07cde4ac4/nova-metadata-log/0.log" Nov 21 20:39:02 crc kubenswrapper[4701]: I1121 20:39:02.356972 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_7dfe7de3-4ade-4a2e-8826-be286e416d33/nova-api-log/0.log" Nov 21 20:39:02 crc kubenswrapper[4701]: I1121 20:39:02.577683 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_7dfe7de3-4ade-4a2e-8826-be286e416d33/nova-api-api/0.log" Nov 21 20:39:02 crc kubenswrapper[4701]: I1121 20:39:02.883331 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b6432247-ed58-4dce-98d4-4267d0122151/mysql-bootstrap/0.log" Nov 21 20:39:02 crc kubenswrapper[4701]: I1121 20:39:02.909733 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_3df472cf-6795-4ca8-908b-01f824bf4b5e/nova-scheduler-scheduler/0.log" Nov 21 20:39:03 crc kubenswrapper[4701]: I1121 20:39:03.154459 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b6432247-ed58-4dce-98d4-4267d0122151/mysql-bootstrap/0.log" Nov 21 20:39:03 crc kubenswrapper[4701]: I1121 20:39:03.214002 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b6432247-ed58-4dce-98d4-4267d0122151/galera/0.log" Nov 21 20:39:03 crc kubenswrapper[4701]: I1121 20:39:03.453314 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_117bcee4-5190-4738-8e03-19f77f4fb428/mysql-bootstrap/0.log" Nov 21 20:39:03 crc kubenswrapper[4701]: I1121 20:39:03.643090 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_117bcee4-5190-4738-8e03-19f77f4fb428/mysql-bootstrap/0.log" Nov 21 20:39:03 crc kubenswrapper[4701]: I1121 20:39:03.751073 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_117bcee4-5190-4738-8e03-19f77f4fb428/galera/0.log" Nov 21 20:39:04 crc kubenswrapper[4701]: I1121 20:39:04.234846 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-49p6k_d6bce0ec-3045-405e-914b-f466321dc7ea/ovn-controller/0.log" Nov 21 20:39:04 crc kubenswrapper[4701]: I1121 20:39:04.238967 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_3cabfe57-5d37-4a59-93e3-aac4836f7d2c/openstackclient/0.log" Nov 21 20:39:04 crc kubenswrapper[4701]: I1121 20:39:04.524670 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-msx5f_81c7a23e-51f0-4360-820b-6f4f7b7daa63/openstack-network-exporter/0.log" Nov 21 20:39:04 crc kubenswrapper[4701]: I1121 20:39:04.813145 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovsdb-server-init/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.027493 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_3e2f4186-103f-4356-8b8a-80a07cde4ac4/nova-metadata-metadata/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.035585 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovsdb-server-init/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.040174 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovsdb-server/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.315905 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-rdjpc_640fcd44-4a2e-475b-b296-5f37ac6d55e7/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.437965 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovs-vswitchd/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.533235 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4b75b7e0-a14e-4889-9430-7cbb446d48d9/ovn-northd/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.581464 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4b75b7e0-a14e-4889-9430-7cbb446d48d9/openstack-network-exporter/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.772723 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_76796a80-e8f7-43ed-862b-011b964a31f9/openstack-network-exporter/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.787813 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_76796a80-e8f7-43ed-862b-011b964a31f9/ovsdbserver-nb/0.log" Nov 21 20:39:05 crc kubenswrapper[4701]: I1121 20:39:05.828713 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_fae8c560-c6a6-453e-8c64-9dca8183e5c0/openstack-network-exporter/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.104047 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_fae8c560-c6a6-453e-8c64-9dca8183e5c0/ovsdbserver-sb/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.375982 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-78b94b5b48-685pj_c5db43f2-147c-4625-9f3a-9f68cc6afa8c/placement-api/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.396245 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/init-config-reloader/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.456103 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-78b94b5b48-685pj_c5db43f2-147c-4625-9f3a-9f68cc6afa8c/placement-log/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.615889 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/init-config-reloader/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.685028 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/prometheus/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.687508 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/config-reloader/0.log" Nov 21 20:39:06 crc kubenswrapper[4701]: I1121 20:39:06.716674 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/thanos-sidecar/0.log" Nov 21 20:39:07 crc kubenswrapper[4701]: I1121 20:39:07.536065 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c53b35a3-36ed-43a5-a400-4658b9408596/setup-container/0.log" Nov 21 20:39:07 crc kubenswrapper[4701]: I1121 20:39:07.894700 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c53b35a3-36ed-43a5-a400-4658b9408596/setup-container/0.log" Nov 21 20:39:07 crc kubenswrapper[4701]: I1121 20:39:07.929797 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c53b35a3-36ed-43a5-a400-4658b9408596/rabbitmq/0.log" Nov 21 20:39:07 crc kubenswrapper[4701]: I1121 20:39:07.998321 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_fa567817-ce17-4cb3-9e55-e14902a96420/setup-container/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.230420 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_fa567817-ce17-4cb3-9e55-e14902a96420/setup-container/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.277532 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_fa567817-ce17-4cb3-9e55-e14902a96420/rabbitmq/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.329979 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcd41199-949d-4c9f-9154-f83acb9bb997/setup-container/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.546487 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcd41199-949d-4c9f-9154-f83acb9bb997/setup-container/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.609191 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb_bcdfb82f-4b6c-44ca-b282-1f803082c73d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.655186 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcd41199-949d-4c9f-9154-f83acb9bb997/rabbitmq/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.924097 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-8vgkh_c3df9720-470f-4076-93ad-cd09d2b8c1d4/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.927502 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n_aa6fec42-0fdb-4b30-80b9-7cea4579dd05/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:08 crc kubenswrapper[4701]: I1121 20:39:08.951896 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:39:08 crc kubenswrapper[4701]: E1121 20:39:08.952163 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:39:09 crc kubenswrapper[4701]: I1121 20:39:09.715895 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-mxn98_53533f20-cd97-4dfe-a00c-e5f0e6f86403/ssh-known-hosts-edpm-deployment/0.log" Nov 21 20:39:09 crc kubenswrapper[4701]: I1121 20:39:09.746003 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-p5jn2_ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.030989 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-66cbbc6b59-4jhxd_567ed826-1db0-4018-b4ea-8af42596aa3e/proxy-server/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.264070 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-qnhkr_6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a/swift-ring-rebalance/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.331524 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-66cbbc6b59-4jhxd_567ed826-1db0-4018-b4ea-8af42596aa3e/proxy-httpd/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.388148 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-auditor/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.500723 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-reaper/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.599936 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-auditor/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.616538 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-server/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.687410 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-replicator/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.805371 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-server/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.846477 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-replicator/0.log" Nov 21 20:39:10 crc kubenswrapper[4701]: I1121 20:39:10.869402 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-updater/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.019975 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-auditor/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.038059 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-expirer/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.128972 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-server/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.180939 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-replicator/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.285161 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/rsync/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.309321 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-updater/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.391709 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/swift-recon-cron/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.646334 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-97f7r_4f066a0d-46d8-4cfd-b188-495f77c256f1/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.659709 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_6dd5f296-841e-4527-88fe-3963fef0e450/tempest-tests-tempest-tests-runner/0.log" Nov 21 20:39:11 crc kubenswrapper[4701]: I1121 20:39:11.944603 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_e641415e-cd44-42d3-b2be-d1b45a79297a/test-operator-logs-container/0.log" Nov 21 20:39:12 crc kubenswrapper[4701]: I1121 20:39:12.081859 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rztzd_2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:39:13 crc kubenswrapper[4701]: I1121 20:39:13.164603 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f/watcher-applier/0.log" Nov 21 20:39:13 crc kubenswrapper[4701]: I1121 20:39:13.766004 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_b909f8f0-603a-420b-8b12-2b15b6c0900e/watcher-api-log/0.log" Nov 21 20:39:17 crc kubenswrapper[4701]: I1121 20:39:17.202526 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_87723852-e421-4a28-a9ce-90390eb3b7a8/watcher-decision-engine/0.log" Nov 21 20:39:18 crc kubenswrapper[4701]: I1121 20:39:18.417049 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_b909f8f0-603a-420b-8b12-2b15b6c0900e/watcher-api/0.log" Nov 21 20:39:23 crc kubenswrapper[4701]: I1121 20:39:23.955720 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:39:23 crc kubenswrapper[4701]: E1121 20:39:23.956627 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:39:25 crc kubenswrapper[4701]: I1121 20:39:25.224343 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_2d891f55-8791-487d-b8f9-b4183da3e720/memcached/0.log" Nov 21 20:39:38 crc kubenswrapper[4701]: I1121 20:39:38.952553 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:39:38 crc kubenswrapper[4701]: E1121 20:39:38.954022 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.371126 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/util/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.556376 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/util/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.598487 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/pull/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.608179 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/pull/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.777184 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/pull/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.789996 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/util/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.844785 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/extract/0.log" Nov 21 20:39:49 crc kubenswrapper[4701]: I1121 20:39:49.966721 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:39:49 crc kubenswrapper[4701]: E1121 20:39:49.967316 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.308590 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-lgvh6_4c5eabdd-f4f8-4180-be28-707592f6d24d/manager/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.325909 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-lgvh6_4c5eabdd-f4f8-4180-be28-707592f6d24d/kube-rbac-proxy/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.365175 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-2ccc7_566d8e82-b230-492d-a47b-80d2351b169e/kube-rbac-proxy/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.568434 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-2ccc7_566d8e82-b230-492d-a47b-80d2351b169e/manager/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.608473 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-qmdtp_87969819-3a91-4333-9585-72a2a27fa6c9/kube-rbac-proxy/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.611764 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-qmdtp_87969819-3a91-4333-9585-72a2a27fa6c9/manager/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.835463 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-t6hz7_0c6d96e4-2798-4525-bcec-61ad137140d8/kube-rbac-proxy/0.log" Nov 21 20:39:50 crc kubenswrapper[4701]: I1121 20:39:50.902588 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-t6hz7_0c6d96e4-2798-4525-bcec-61ad137140d8/manager/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.043666 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-c2mhg_ef203e45-f1b1-4a9a-9987-66bb33655a95/kube-rbac-proxy/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.097380 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-kkz2m_a76b2214-2c16-4b55-bf3d-c7bdf1019237/kube-rbac-proxy/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.151708 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-c2mhg_ef203e45-f1b1-4a9a-9987-66bb33655a95/manager/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.277871 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-kkz2m_a76b2214-2c16-4b55-bf3d-c7bdf1019237/manager/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.471913 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-vbqvb_b15963ff-1822-4079-8cce-266b05a9ac47/kube-rbac-proxy/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.563443 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-vbqvb_b15963ff-1822-4079-8cce-266b05a9ac47/manager/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.619176 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-gg7tq_c7b87a42-0af4-4484-845e-f2993960537c/kube-rbac-proxy/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.713748 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-gg7tq_c7b87a42-0af4-4484-845e-f2993960537c/manager/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.822930 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-hz5sk_61444bc1-a24a-4c29-94b8-953ae2dc8621/kube-rbac-proxy/0.log" Nov 21 20:39:51 crc kubenswrapper[4701]: I1121 20:39:51.973545 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-hz5sk_61444bc1-a24a-4c29-94b8-953ae2dc8621/manager/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.031306 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-467lr_bcf3ee80-4bca-445a-84aa-ef30d99b7b9a/kube-rbac-proxy/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.112862 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-467lr_bcf3ee80-4bca-445a-84aa-ef30d99b7b9a/manager/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.288557 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-rlq95_1440b54d-d3f5-46a9-b335-27a6d2031d24/kube-rbac-proxy/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.346840 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-rlq95_1440b54d-d3f5-46a9-b335-27a6d2031d24/manager/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.549370 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-rzvnf_8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d/kube-rbac-proxy/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.637540 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-rzvnf_8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d/manager/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.699168 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zrwsd_21028817-64c6-4a7c-8427-8ee3db1dec7b/kube-rbac-proxy/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.893371 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zrwsd_21028817-64c6-4a7c-8427-8ee3db1dec7b/manager/0.log" Nov 21 20:39:52 crc kubenswrapper[4701]: I1121 20:39:52.958660 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-qqmcw_015395c6-297a-4a90-a5fd-49dcdde237af/kube-rbac-proxy/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.038569 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-qqmcw_015395c6-297a-4a90-a5fd-49dcdde237af/manager/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.237431 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-c58b4_0439f0bf-0ea9-4553-a53c-74f87b31a6a7/kube-rbac-proxy/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.286272 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-c58b4_0439f0bf-0ea9-4553-a53c-74f87b31a6a7/manager/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.384425 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7467d8c866-fgkj9_743fbd83-3b42-4083-b06f-ae81d6294066/kube-rbac-proxy/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.603108 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6f8fb57dc8-vmnj6_fc60458d-83dd-4a11-b22d-6a8a7f5f01f6/kube-rbac-proxy/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.868887 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-hm4sz_42334fc1-ad97-4595-bb9b-4c7f736391e4/registry-server/0.log" Nov 21 20:39:53 crc kubenswrapper[4701]: I1121 20:39:53.918932 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-vmsdw_9a58290e-d37e-4094-8ed8-4ed701c1292c/kube-rbac-proxy/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.046161 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6f8fb57dc8-vmnj6_fc60458d-83dd-4a11-b22d-6a8a7f5f01f6/operator/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.148761 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-vmsdw_9a58290e-d37e-4094-8ed8-4ed701c1292c/manager/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.179562 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-96954_e2fc7504-afe1-4197-a366-c765c52366b0/kube-rbac-proxy/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.294702 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-96954_e2fc7504-afe1-4197-a366-c765c52366b0/manager/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.490899 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-knf78_82428420-1129-4ce6-a969-7d54bb2f0d52/operator/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.571413 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-pwg6n_565f6d5b-92e9-4fc5-9c4b-9c06b8946754/kube-rbac-proxy/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.734301 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-6d2f7_66d77e65-ca72-473d-9697-9168a951b0c9/kube-rbac-proxy/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.746721 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7467d8c866-fgkj9_743fbd83-3b42-4083-b06f-ae81d6294066/manager/0.log" Nov 21 20:39:54 crc kubenswrapper[4701]: I1121 20:39:54.776878 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-pwg6n_565f6d5b-92e9-4fc5-9c4b-9c06b8946754/manager/0.log" Nov 21 20:39:55 crc kubenswrapper[4701]: I1121 20:39:55.005478 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-6d2f7_66d77e65-ca72-473d-9697-9168a951b0c9/manager/0.log" Nov 21 20:39:55 crc kubenswrapper[4701]: I1121 20:39:55.028186 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-9rtf7_1816b847-d41a-400a-bb1d-4f7551cfd581/kube-rbac-proxy/0.log" Nov 21 20:39:55 crc kubenswrapper[4701]: I1121 20:39:55.068736 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-9rtf7_1816b847-d41a-400a-bb1d-4f7551cfd581/manager/0.log" Nov 21 20:39:55 crc kubenswrapper[4701]: I1121 20:39:55.233692 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5c984db885-xjww4_b571034c-9574-4a93-80e9-abbf663e6ac3/kube-rbac-proxy/0.log" Nov 21 20:39:55 crc kubenswrapper[4701]: I1121 20:39:55.254342 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5c984db885-xjww4_b571034c-9574-4a93-80e9-abbf663e6ac3/manager/0.log" Nov 21 20:40:00 crc kubenswrapper[4701]: I1121 20:40:00.952081 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:40:00 crc kubenswrapper[4701]: E1121 20:40:00.954481 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:40:15 crc kubenswrapper[4701]: I1121 20:40:15.951386 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:40:15 crc kubenswrapper[4701]: E1121 20:40:15.952366 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:40:16 crc kubenswrapper[4701]: I1121 20:40:16.463485 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-l7w9b_6469e01b-cfc6-4ec9-87de-29c6eeee136f/control-plane-machine-set-operator/0.log" Nov 21 20:40:16 crc kubenswrapper[4701]: I1121 20:40:16.686926 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-z6z69_f741f928-61fd-41d5-b8c8-879a4744fa2e/kube-rbac-proxy/0.log" Nov 21 20:40:16 crc kubenswrapper[4701]: I1121 20:40:16.687605 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-z6z69_f741f928-61fd-41d5-b8c8-879a4744fa2e/machine-api-operator/0.log" Nov 21 20:40:29 crc kubenswrapper[4701]: I1121 20:40:29.962690 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:40:29 crc kubenswrapper[4701]: E1121 20:40:29.963803 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:40:33 crc kubenswrapper[4701]: I1121 20:40:33.288404 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-868vc_0da0430e-e5cb-465f-8e96-49906f8c0965/cert-manager-controller/0.log" Nov 21 20:40:33 crc kubenswrapper[4701]: I1121 20:40:33.490909 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-2qrjs_325ae061-87b1-4272-a2d5-29a4fcf689f2/cert-manager-cainjector/0.log" Nov 21 20:40:33 crc kubenswrapper[4701]: I1121 20:40:33.575901 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-z4fmm_e8e88792-2751-4fcc-b8b0-dd03328e12b8/cert-manager-webhook/0.log" Nov 21 20:40:40 crc kubenswrapper[4701]: I1121 20:40:40.951637 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:40:40 crc kubenswrapper[4701]: E1121 20:40:40.953012 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:40:50 crc kubenswrapper[4701]: I1121 20:40:50.282773 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-mxxhh_cc6e0ff9-3b8f-403f-9f52-52808c29059d/nmstate-console-plugin/0.log" Nov 21 20:40:50 crc kubenswrapper[4701]: I1121 20:40:50.447217 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-7sdtf_ff85cf8f-c850-4455-92b8-c7bc1c548e68/nmstate-handler/0.log" Nov 21 20:40:50 crc kubenswrapper[4701]: I1121 20:40:50.513808 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-p7n5j_51b03b0f-062b-45d4-95b9-f965e2b69d80/kube-rbac-proxy/0.log" Nov 21 20:40:50 crc kubenswrapper[4701]: I1121 20:40:50.585150 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-p7n5j_51b03b0f-062b-45d4-95b9-f965e2b69d80/nmstate-metrics/0.log" Nov 21 20:40:50 crc kubenswrapper[4701]: I1121 20:40:50.709741 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-6fdlz_22701628-3f03-4106-ad9e-1b727e2b7c08/nmstate-operator/0.log" Nov 21 20:40:50 crc kubenswrapper[4701]: I1121 20:40:50.851499 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-pthql_45825b15-9674-46b3-b29e-7d78c4de3274/nmstate-webhook/0.log" Nov 21 20:40:51 crc kubenswrapper[4701]: I1121 20:40:51.951304 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:40:51 crc kubenswrapper[4701]: E1121 20:40:51.951737 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:41:05 crc kubenswrapper[4701]: I1121 20:41:05.952696 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:41:05 crc kubenswrapper[4701]: E1121 20:41:05.954336 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:41:10 crc kubenswrapper[4701]: I1121 20:41:10.257092 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-htbbv_f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c/kube-rbac-proxy/0.log" Nov 21 20:41:10 crc kubenswrapper[4701]: I1121 20:41:10.470627 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:41:10 crc kubenswrapper[4701]: I1121 20:41:10.477334 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-htbbv_f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c/controller/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.539352 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.552560 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.595516 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.609595 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.922004 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.931608 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.949847 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:41:11 crc kubenswrapper[4701]: I1121 20:41:11.991330 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.161153 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.182102 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.239885 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.315928 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/controller/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.474384 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/kube-rbac-proxy/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.521023 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/kube-rbac-proxy-frr/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.553317 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/frr-metrics/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.726969 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/reloader/0.log" Nov 21 20:41:12 crc kubenswrapper[4701]: I1121 20:41:12.760031 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-4zbkd_dc0b171f-3d9f-41b9-914b-ca723de8416f/frr-k8s-webhook-server/0.log" Nov 21 20:41:13 crc kubenswrapper[4701]: I1121 20:41:13.067575 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5b89f66749-2c7l9_1e13b4b2-e1e0-4f12-b8a4-e364e57407b1/manager/0.log" Nov 21 20:41:13 crc kubenswrapper[4701]: I1121 20:41:13.099127 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7fffd4c557-7pqsr_394bdfbe-ca77-47ca-837c-68023e532b01/webhook-server/0.log" Nov 21 20:41:13 crc kubenswrapper[4701]: I1121 20:41:13.260492 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-hmzkz_fa17db1f-33dc-4f0e-a191-7a01d67c575d/kube-rbac-proxy/0.log" Nov 21 20:41:13 crc kubenswrapper[4701]: I1121 20:41:13.889445 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-hmzkz_fa17db1f-33dc-4f0e-a191-7a01d67c575d/speaker/0.log" Nov 21 20:41:14 crc kubenswrapper[4701]: I1121 20:41:14.179175 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/frr/0.log" Nov 21 20:41:17 crc kubenswrapper[4701]: I1121 20:41:17.950963 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:41:17 crc kubenswrapper[4701]: E1121 20:41:17.951686 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:41:29 crc kubenswrapper[4701]: I1121 20:41:29.633555 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/util/0.log" Nov 21 20:41:29 crc kubenswrapper[4701]: I1121 20:41:29.828425 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/util/0.log" Nov 21 20:41:29 crc kubenswrapper[4701]: I1121 20:41:29.833776 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/pull/0.log" Nov 21 20:41:29 crc kubenswrapper[4701]: I1121 20:41:29.839938 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/pull/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.045392 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/pull/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.058852 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/extract/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.068180 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/util/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.208556 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/util/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.420305 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/util/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.469407 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/pull/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.476883 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/pull/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.681461 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/extract/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.701089 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/util/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.725323 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/pull/0.log" Nov 21 20:41:30 crc kubenswrapper[4701]: I1121 20:41:30.898234 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-utilities/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.087553 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-content/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.091461 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-utilities/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.147060 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-content/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.316570 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-utilities/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.375955 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-content/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.549813 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-utilities/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.745942 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-content/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.808695 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-utilities/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.829700 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-content/0.log" Nov 21 20:41:31 crc kubenswrapper[4701]: I1121 20:41:31.952690 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:41:31 crc kubenswrapper[4701]: E1121 20:41:31.953142 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.071628 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/registry-server/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.103412 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-content/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.168086 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-utilities/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.356556 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/util/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.543867 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/util/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.616881 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/pull/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.633535 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/pull/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.878634 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/extract/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.919721 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/pull/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.952920 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/util/0.log" Nov 21 20:41:32 crc kubenswrapper[4701]: I1121 20:41:32.986881 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/registry-server/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.180642 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-7grnb_be676bd8-0b5e-48b4-829b-021f132d3247/marketplace-operator/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.238933 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-utilities/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.411789 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-utilities/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.445014 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-content/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.466250 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-content/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.658471 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-utilities/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.662988 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-content/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.667829 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-utilities/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.931460 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/registry-server/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.954085 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-utilities/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.965378 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-content/0.log" Nov 21 20:41:33 crc kubenswrapper[4701]: I1121 20:41:33.990681 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-content/0.log" Nov 21 20:41:34 crc kubenswrapper[4701]: I1121 20:41:34.175390 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-content/0.log" Nov 21 20:41:34 crc kubenswrapper[4701]: I1121 20:41:34.193921 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-utilities/0.log" Nov 21 20:41:34 crc kubenswrapper[4701]: I1121 20:41:34.853323 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/registry-server/0.log" Nov 21 20:41:42 crc kubenswrapper[4701]: I1121 20:41:42.952285 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:41:42 crc kubenswrapper[4701]: E1121 20:41:42.953626 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:41:51 crc kubenswrapper[4701]: I1121 20:41:51.484414 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-2rtjt_de3cafdb-cdad-4b38-a867-fd0e88551dc7/prometheus-operator/0.log" Nov 21 20:41:51 crc kubenswrapper[4701]: I1121 20:41:51.609744 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g_91073027-6c2c-4cbf-af6d-bd763b073a0b/prometheus-operator-admission-webhook/0.log" Nov 21 20:41:51 crc kubenswrapper[4701]: I1121 20:41:51.657052 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn_11625333-ca33-46bb-9856-a0390b6283bf/prometheus-operator-admission-webhook/0.log" Nov 21 20:41:52 crc kubenswrapper[4701]: I1121 20:41:52.704689 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-b65kf_c4bfc6b7-63e8-4ab2-a9f6-369332e97f12/operator/0.log" Nov 21 20:41:52 crc kubenswrapper[4701]: I1121 20:41:52.719402 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-bczvw_4adf7511-ec5a-47a4-9e69-c8650f1bc017/perses-operator/0.log" Nov 21 20:41:54 crc kubenswrapper[4701]: I1121 20:41:54.952672 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:41:54 crc kubenswrapper[4701]: E1121 20:41:54.953772 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:42:07 crc kubenswrapper[4701]: I1121 20:42:07.951977 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:42:07 crc kubenswrapper[4701]: E1121 20:42:07.952764 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:42:22 crc kubenswrapper[4701]: I1121 20:42:22.951516 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:42:22 crc kubenswrapper[4701]: E1121 20:42:22.953408 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:42:34 crc kubenswrapper[4701]: I1121 20:42:34.951177 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:42:34 crc kubenswrapper[4701]: E1121 20:42:34.952292 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.658480 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-swkwz"] Nov 21 20:42:43 crc kubenswrapper[4701]: E1121 20:42:43.659660 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c7e42b4-c6d1-431c-9b7c-0533176d50db" containerName="container-00" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.659672 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c7e42b4-c6d1-431c-9b7c-0533176d50db" containerName="container-00" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.659936 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c7e42b4-c6d1-431c-9b7c-0533176d50db" containerName="container-00" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.661528 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.700254 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-swkwz"] Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.741669 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zhxh\" (UniqueName: \"kubernetes.io/projected/57bd8418-d205-4455-ab33-d306e2833dc5-kube-api-access-7zhxh\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.742515 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-catalog-content\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.742707 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-utilities\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.844559 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zhxh\" (UniqueName: \"kubernetes.io/projected/57bd8418-d205-4455-ab33-d306e2833dc5-kube-api-access-7zhxh\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.844685 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-catalog-content\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.844793 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-utilities\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.845214 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-catalog-content\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.845428 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-utilities\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:43 crc kubenswrapper[4701]: I1121 20:42:43.870099 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zhxh\" (UniqueName: \"kubernetes.io/projected/57bd8418-d205-4455-ab33-d306e2833dc5-kube-api-access-7zhxh\") pod \"redhat-operators-swkwz\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:44 crc kubenswrapper[4701]: I1121 20:42:44.012852 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:44 crc kubenswrapper[4701]: I1121 20:42:44.506657 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-swkwz"] Nov 21 20:42:44 crc kubenswrapper[4701]: I1121 20:42:44.906583 4701 generic.go:334] "Generic (PLEG): container finished" podID="57bd8418-d205-4455-ab33-d306e2833dc5" containerID="1f2883d6c5d3a583ac4bb6d1e55cd952679720337a0bdf8f9b10fa9d2f69ad0b" exitCode=0 Nov 21 20:42:44 crc kubenswrapper[4701]: I1121 20:42:44.906775 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerDied","Data":"1f2883d6c5d3a583ac4bb6d1e55cd952679720337a0bdf8f9b10fa9d2f69ad0b"} Nov 21 20:42:44 crc kubenswrapper[4701]: I1121 20:42:44.906962 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerStarted","Data":"e76fe1062861d78f97f968bc67d3d59ff0180bbaf147bd8d298abf11dfb6ffd6"} Nov 21 20:42:44 crc kubenswrapper[4701]: I1121 20:42:44.909503 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:42:46 crc kubenswrapper[4701]: I1121 20:42:46.943196 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerStarted","Data":"dcb7cc2aa7b12534c6f6493e39adbb011a064c99bbd8816eeadb9b4fec8ee383"} Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.249584 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-57ttz"] Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.253861 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.265113 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-57ttz"] Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.297024 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj7g5\" (UniqueName: \"kubernetes.io/projected/515b78f2-c72c-409d-8101-997b42623e59-kube-api-access-jj7g5\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.297178 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-utilities\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.297385 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-catalog-content\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.399347 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-utilities\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.399490 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-catalog-content\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.399570 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj7g5\" (UniqueName: \"kubernetes.io/projected/515b78f2-c72c-409d-8101-997b42623e59-kube-api-access-jj7g5\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.399956 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-utilities\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.400077 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-catalog-content\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.427979 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj7g5\" (UniqueName: \"kubernetes.io/projected/515b78f2-c72c-409d-8101-997b42623e59-kube-api-access-jj7g5\") pod \"redhat-marketplace-57ttz\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.618303 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.849136 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wr9nd"] Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.851746 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.858613 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wr9nd"] Nov 21 20:42:48 crc kubenswrapper[4701]: I1121 20:42:48.953802 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:42:48 crc kubenswrapper[4701]: E1121 20:42:48.954285 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.013761 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrmvz\" (UniqueName: \"kubernetes.io/projected/6410a611-8058-4bcd-9058-80e84ec073d5-kube-api-access-vrmvz\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.013861 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-utilities\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.013995 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-catalog-content\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.116091 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrmvz\" (UniqueName: \"kubernetes.io/projected/6410a611-8058-4bcd-9058-80e84ec073d5-kube-api-access-vrmvz\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.116168 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-utilities\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.116269 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-catalog-content\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.116957 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-catalog-content\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.117284 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-utilities\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.140745 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrmvz\" (UniqueName: \"kubernetes.io/projected/6410a611-8058-4bcd-9058-80e84ec073d5-kube-api-access-vrmvz\") pod \"certified-operators-wr9nd\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.193021 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-57ttz"] Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.196370 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:49 crc kubenswrapper[4701]: W1121 20:42:49.198334 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod515b78f2_c72c_409d_8101_997b42623e59.slice/crio-57eab692f190097992d8de8a186ddcb95f6ab9caa75ae3e8d30a4e71fa264192 WatchSource:0}: Error finding container 57eab692f190097992d8de8a186ddcb95f6ab9caa75ae3e8d30a4e71fa264192: Status 404 returned error can't find the container with id 57eab692f190097992d8de8a186ddcb95f6ab9caa75ae3e8d30a4e71fa264192 Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.582476 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wr9nd"] Nov 21 20:42:49 crc kubenswrapper[4701]: I1121 20:42:49.996382 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerStarted","Data":"57eab692f190097992d8de8a186ddcb95f6ab9caa75ae3e8d30a4e71fa264192"} Nov 21 20:42:50 crc kubenswrapper[4701]: W1121 20:42:50.107666 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6410a611_8058_4bcd_9058_80e84ec073d5.slice/crio-cc819dd9636b6a199e103f3081452c3607873af64bae3dee180fe747ed9dd6cc WatchSource:0}: Error finding container cc819dd9636b6a199e103f3081452c3607873af64bae3dee180fe747ed9dd6cc: Status 404 returned error can't find the container with id cc819dd9636b6a199e103f3081452c3607873af64bae3dee180fe747ed9dd6cc Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.019227 4701 generic.go:334] "Generic (PLEG): container finished" podID="515b78f2-c72c-409d-8101-997b42623e59" containerID="fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14" exitCode=0 Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.019330 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerDied","Data":"fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14"} Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.028472 4701 generic.go:334] "Generic (PLEG): container finished" podID="6410a611-8058-4bcd-9058-80e84ec073d5" containerID="74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4" exitCode=0 Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.028564 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerDied","Data":"74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4"} Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.028659 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerStarted","Data":"cc819dd9636b6a199e103f3081452c3607873af64bae3dee180fe747ed9dd6cc"} Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.049251 4701 generic.go:334] "Generic (PLEG): container finished" podID="57bd8418-d205-4455-ab33-d306e2833dc5" containerID="dcb7cc2aa7b12534c6f6493e39adbb011a064c99bbd8816eeadb9b4fec8ee383" exitCode=0 Nov 21 20:42:51 crc kubenswrapper[4701]: I1121 20:42:51.049316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerDied","Data":"dcb7cc2aa7b12534c6f6493e39adbb011a064c99bbd8816eeadb9b4fec8ee383"} Nov 21 20:42:52 crc kubenswrapper[4701]: I1121 20:42:52.068230 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerStarted","Data":"02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391"} Nov 21 20:42:52 crc kubenswrapper[4701]: I1121 20:42:52.072986 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerStarted","Data":"13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7"} Nov 21 20:42:52 crc kubenswrapper[4701]: I1121 20:42:52.077671 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerStarted","Data":"6b6708f573b7ebd8f9e9d2ada43207be1c5880f744dc9142c8985f1598f3ab7a"} Nov 21 20:42:52 crc kubenswrapper[4701]: I1121 20:42:52.137157 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-swkwz" podStartSLOduration=2.5732468109999997 podStartE2EDuration="9.137131884s" podCreationTimestamp="2025-11-21 20:42:43 +0000 UTC" firstStartedPulling="2025-11-21 20:42:44.90929801 +0000 UTC m=+6055.694438037" lastFinishedPulling="2025-11-21 20:42:51.473183063 +0000 UTC m=+6062.258323110" observedRunningTime="2025-11-21 20:42:52.122858373 +0000 UTC m=+6062.907998400" watchObservedRunningTime="2025-11-21 20:42:52.137131884 +0000 UTC m=+6062.922271911" Nov 21 20:42:53 crc kubenswrapper[4701]: I1121 20:42:53.096295 4701 generic.go:334] "Generic (PLEG): container finished" podID="515b78f2-c72c-409d-8101-997b42623e59" containerID="02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391" exitCode=0 Nov 21 20:42:53 crc kubenswrapper[4701]: I1121 20:42:53.096394 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerDied","Data":"02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391"} Nov 21 20:42:54 crc kubenswrapper[4701]: I1121 20:42:54.013881 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:54 crc kubenswrapper[4701]: I1121 20:42:54.013950 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:42:54 crc kubenswrapper[4701]: I1121 20:42:54.111747 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerStarted","Data":"17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd"} Nov 21 20:42:54 crc kubenswrapper[4701]: I1121 20:42:54.117627 4701 generic.go:334] "Generic (PLEG): container finished" podID="6410a611-8058-4bcd-9058-80e84ec073d5" containerID="13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7" exitCode=0 Nov 21 20:42:54 crc kubenswrapper[4701]: I1121 20:42:54.117703 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerDied","Data":"13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7"} Nov 21 20:42:54 crc kubenswrapper[4701]: I1121 20:42:54.188232 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-57ttz" podStartSLOduration=3.700705376 podStartE2EDuration="6.18817737s" podCreationTimestamp="2025-11-21 20:42:48 +0000 UTC" firstStartedPulling="2025-11-21 20:42:51.022592669 +0000 UTC m=+6061.807732706" lastFinishedPulling="2025-11-21 20:42:53.510064643 +0000 UTC m=+6064.295204700" observedRunningTime="2025-11-21 20:42:54.137327505 +0000 UTC m=+6064.922467592" watchObservedRunningTime="2025-11-21 20:42:54.18817737 +0000 UTC m=+6064.973317417" Nov 21 20:42:55 crc kubenswrapper[4701]: I1121 20:42:55.080617 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-swkwz" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="registry-server" probeResult="failure" output=< Nov 21 20:42:55 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:42:55 crc kubenswrapper[4701]: > Nov 21 20:42:55 crc kubenswrapper[4701]: I1121 20:42:55.131670 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerStarted","Data":"9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91"} Nov 21 20:42:55 crc kubenswrapper[4701]: I1121 20:42:55.160832 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wr9nd" podStartSLOduration=3.646271476 podStartE2EDuration="7.160800744s" podCreationTimestamp="2025-11-21 20:42:48 +0000 UTC" firstStartedPulling="2025-11-21 20:42:51.039417997 +0000 UTC m=+6061.824558024" lastFinishedPulling="2025-11-21 20:42:54.553947225 +0000 UTC m=+6065.339087292" observedRunningTime="2025-11-21 20:42:55.153702394 +0000 UTC m=+6065.938842431" watchObservedRunningTime="2025-11-21 20:42:55.160800744 +0000 UTC m=+6065.945940781" Nov 21 20:42:58 crc kubenswrapper[4701]: I1121 20:42:58.619753 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:58 crc kubenswrapper[4701]: I1121 20:42:58.620790 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:58 crc kubenswrapper[4701]: I1121 20:42:58.701669 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:59 crc kubenswrapper[4701]: I1121 20:42:59.197076 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:59 crc kubenswrapper[4701]: I1121 20:42:59.200693 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:59 crc kubenswrapper[4701]: I1121 20:42:59.256402 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:42:59 crc kubenswrapper[4701]: I1121 20:42:59.282525 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:42:59 crc kubenswrapper[4701]: I1121 20:42:59.971401 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:42:59 crc kubenswrapper[4701]: E1121 20:42:59.971822 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:43:00 crc kubenswrapper[4701]: I1121 20:43:00.248097 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-57ttz"] Nov 21 20:43:00 crc kubenswrapper[4701]: I1121 20:43:00.287157 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.210735 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-57ttz" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="registry-server" containerID="cri-o://17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd" gracePeriod=2 Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.641582 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wr9nd"] Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.711135 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.820807 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-catalog-content\") pod \"515b78f2-c72c-409d-8101-997b42623e59\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.820851 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-utilities\") pod \"515b78f2-c72c-409d-8101-997b42623e59\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.821032 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jj7g5\" (UniqueName: \"kubernetes.io/projected/515b78f2-c72c-409d-8101-997b42623e59-kube-api-access-jj7g5\") pod \"515b78f2-c72c-409d-8101-997b42623e59\" (UID: \"515b78f2-c72c-409d-8101-997b42623e59\") " Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.822010 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-utilities" (OuterVolumeSpecName: "utilities") pod "515b78f2-c72c-409d-8101-997b42623e59" (UID: "515b78f2-c72c-409d-8101-997b42623e59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.833570 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/515b78f2-c72c-409d-8101-997b42623e59-kube-api-access-jj7g5" (OuterVolumeSpecName: "kube-api-access-jj7g5") pod "515b78f2-c72c-409d-8101-997b42623e59" (UID: "515b78f2-c72c-409d-8101-997b42623e59"). InnerVolumeSpecName "kube-api-access-jj7g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.850118 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "515b78f2-c72c-409d-8101-997b42623e59" (UID: "515b78f2-c72c-409d-8101-997b42623e59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.924300 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.924351 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/515b78f2-c72c-409d-8101-997b42623e59-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:01 crc kubenswrapper[4701]: I1121 20:43:01.924364 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jj7g5\" (UniqueName: \"kubernetes.io/projected/515b78f2-c72c-409d-8101-997b42623e59-kube-api-access-jj7g5\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.247383 4701 generic.go:334] "Generic (PLEG): container finished" podID="515b78f2-c72c-409d-8101-997b42623e59" containerID="17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd" exitCode=0 Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.248879 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57ttz" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.249773 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerDied","Data":"17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd"} Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.249809 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57ttz" event={"ID":"515b78f2-c72c-409d-8101-997b42623e59","Type":"ContainerDied","Data":"57eab692f190097992d8de8a186ddcb95f6ab9caa75ae3e8d30a4e71fa264192"} Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.249832 4701 scope.go:117] "RemoveContainer" containerID="17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.326000 4701 scope.go:117] "RemoveContainer" containerID="02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.343093 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-57ttz"] Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.354171 4701 scope.go:117] "RemoveContainer" containerID="fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.357860 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-57ttz"] Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.403331 4701 scope.go:117] "RemoveContainer" containerID="17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd" Nov 21 20:43:02 crc kubenswrapper[4701]: E1121 20:43:02.403908 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd\": container with ID starting with 17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd not found: ID does not exist" containerID="17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.403965 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd"} err="failed to get container status \"17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd\": rpc error: code = NotFound desc = could not find container \"17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd\": container with ID starting with 17e5ef18d6f497c0a45c44c09dc99f44ae6f0975488c06b3cbfad4203e3e4ebd not found: ID does not exist" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.404008 4701 scope.go:117] "RemoveContainer" containerID="02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391" Nov 21 20:43:02 crc kubenswrapper[4701]: E1121 20:43:02.404486 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391\": container with ID starting with 02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391 not found: ID does not exist" containerID="02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.404525 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391"} err="failed to get container status \"02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391\": rpc error: code = NotFound desc = could not find container \"02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391\": container with ID starting with 02baa1b875c3dc44bc4643bf1982c901c8651d8350ab181aec28f87ade6fa391 not found: ID does not exist" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.404542 4701 scope.go:117] "RemoveContainer" containerID="fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14" Nov 21 20:43:02 crc kubenswrapper[4701]: E1121 20:43:02.404787 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14\": container with ID starting with fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14 not found: ID does not exist" containerID="fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14" Nov 21 20:43:02 crc kubenswrapper[4701]: I1121 20:43:02.404820 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14"} err="failed to get container status \"fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14\": rpc error: code = NotFound desc = could not find container \"fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14\": container with ID starting with fc66608b4fd1d22c2977d97a58c0516a00c28a8a6170f3072f9846337c03cb14 not found: ID does not exist" Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.265786 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wr9nd" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="registry-server" containerID="cri-o://9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91" gracePeriod=2 Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.837129 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.964780 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="515b78f2-c72c-409d-8101-997b42623e59" path="/var/lib/kubelet/pods/515b78f2-c72c-409d-8101-997b42623e59/volumes" Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.982994 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrmvz\" (UniqueName: \"kubernetes.io/projected/6410a611-8058-4bcd-9058-80e84ec073d5-kube-api-access-vrmvz\") pod \"6410a611-8058-4bcd-9058-80e84ec073d5\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.983082 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-utilities\") pod \"6410a611-8058-4bcd-9058-80e84ec073d5\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.983296 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-catalog-content\") pod \"6410a611-8058-4bcd-9058-80e84ec073d5\" (UID: \"6410a611-8058-4bcd-9058-80e84ec073d5\") " Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.984675 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-utilities" (OuterVolumeSpecName: "utilities") pod "6410a611-8058-4bcd-9058-80e84ec073d5" (UID: "6410a611-8058-4bcd-9058-80e84ec073d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:43:03 crc kubenswrapper[4701]: I1121 20:43:03.994793 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6410a611-8058-4bcd-9058-80e84ec073d5-kube-api-access-vrmvz" (OuterVolumeSpecName: "kube-api-access-vrmvz") pod "6410a611-8058-4bcd-9058-80e84ec073d5" (UID: "6410a611-8058-4bcd-9058-80e84ec073d5"). InnerVolumeSpecName "kube-api-access-vrmvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.067439 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6410a611-8058-4bcd-9058-80e84ec073d5" (UID: "6410a611-8058-4bcd-9058-80e84ec073d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.087993 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrmvz\" (UniqueName: \"kubernetes.io/projected/6410a611-8058-4bcd-9058-80e84ec073d5-kube-api-access-vrmvz\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.088069 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.088089 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6410a611-8058-4bcd-9058-80e84ec073d5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.275972 4701 generic.go:334] "Generic (PLEG): container finished" podID="6410a611-8058-4bcd-9058-80e84ec073d5" containerID="9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91" exitCode=0 Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.276025 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerDied","Data":"9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91"} Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.276065 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wr9nd" event={"ID":"6410a611-8058-4bcd-9058-80e84ec073d5","Type":"ContainerDied","Data":"cc819dd9636b6a199e103f3081452c3607873af64bae3dee180fe747ed9dd6cc"} Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.276070 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wr9nd" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.276083 4701 scope.go:117] "RemoveContainer" containerID="9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.301383 4701 scope.go:117] "RemoveContainer" containerID="13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.321125 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wr9nd"] Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.337939 4701 scope.go:117] "RemoveContainer" containerID="74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.342946 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wr9nd"] Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.382533 4701 scope.go:117] "RemoveContainer" containerID="9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91" Nov 21 20:43:04 crc kubenswrapper[4701]: E1121 20:43:04.383700 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91\": container with ID starting with 9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91 not found: ID does not exist" containerID="9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.383745 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91"} err="failed to get container status \"9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91\": rpc error: code = NotFound desc = could not find container \"9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91\": container with ID starting with 9f13476ba4433522c5e8f9e01873b235a5a1ec5de37f6c6008adea5657095a91 not found: ID does not exist" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.383773 4701 scope.go:117] "RemoveContainer" containerID="13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7" Nov 21 20:43:04 crc kubenswrapper[4701]: E1121 20:43:04.384074 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7\": container with ID starting with 13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7 not found: ID does not exist" containerID="13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.384106 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7"} err="failed to get container status \"13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7\": rpc error: code = NotFound desc = could not find container \"13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7\": container with ID starting with 13aff5644c8c7f0d7d82e750591221c996817a42668824b1dfa5544f9a98b6b7 not found: ID does not exist" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.384125 4701 scope.go:117] "RemoveContainer" containerID="74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4" Nov 21 20:43:04 crc kubenswrapper[4701]: E1121 20:43:04.384426 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4\": container with ID starting with 74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4 not found: ID does not exist" containerID="74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4" Nov 21 20:43:04 crc kubenswrapper[4701]: I1121 20:43:04.384447 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4"} err="failed to get container status \"74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4\": rpc error: code = NotFound desc = could not find container \"74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4\": container with ID starting with 74c4dc1d7fe17c8bc6f4e87fa004965eadd245eed26222be3783b661d5529cb4 not found: ID does not exist" Nov 21 20:43:05 crc kubenswrapper[4701]: I1121 20:43:05.079582 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-swkwz" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="registry-server" probeResult="failure" output=< Nov 21 20:43:05 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:43:05 crc kubenswrapper[4701]: > Nov 21 20:43:05 crc kubenswrapper[4701]: I1121 20:43:05.966615 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" path="/var/lib/kubelet/pods/6410a611-8058-4bcd-9058-80e84ec073d5/volumes" Nov 21 20:43:11 crc kubenswrapper[4701]: I1121 20:43:11.952016 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:43:11 crc kubenswrapper[4701]: E1121 20:43:11.953783 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:43:14 crc kubenswrapper[4701]: I1121 20:43:14.106629 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:43:14 crc kubenswrapper[4701]: I1121 20:43:14.183985 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:43:14 crc kubenswrapper[4701]: I1121 20:43:14.885203 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-swkwz"] Nov 21 20:43:15 crc kubenswrapper[4701]: I1121 20:43:15.443189 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-swkwz" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="registry-server" containerID="cri-o://6b6708f573b7ebd8f9e9d2ada43207be1c5880f744dc9142c8985f1598f3ab7a" gracePeriod=2 Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.456988 4701 generic.go:334] "Generic (PLEG): container finished" podID="57bd8418-d205-4455-ab33-d306e2833dc5" containerID="6b6708f573b7ebd8f9e9d2ada43207be1c5880f744dc9142c8985f1598f3ab7a" exitCode=0 Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.457260 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerDied","Data":"6b6708f573b7ebd8f9e9d2ada43207be1c5880f744dc9142c8985f1598f3ab7a"} Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.576942 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.674566 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zhxh\" (UniqueName: \"kubernetes.io/projected/57bd8418-d205-4455-ab33-d306e2833dc5-kube-api-access-7zhxh\") pod \"57bd8418-d205-4455-ab33-d306e2833dc5\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.675107 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-catalog-content\") pod \"57bd8418-d205-4455-ab33-d306e2833dc5\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.675163 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-utilities\") pod \"57bd8418-d205-4455-ab33-d306e2833dc5\" (UID: \"57bd8418-d205-4455-ab33-d306e2833dc5\") " Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.676373 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-utilities" (OuterVolumeSpecName: "utilities") pod "57bd8418-d205-4455-ab33-d306e2833dc5" (UID: "57bd8418-d205-4455-ab33-d306e2833dc5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.677082 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.680539 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57bd8418-d205-4455-ab33-d306e2833dc5-kube-api-access-7zhxh" (OuterVolumeSpecName: "kube-api-access-7zhxh") pod "57bd8418-d205-4455-ab33-d306e2833dc5" (UID: "57bd8418-d205-4455-ab33-d306e2833dc5"). InnerVolumeSpecName "kube-api-access-7zhxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.778296 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zhxh\" (UniqueName: \"kubernetes.io/projected/57bd8418-d205-4455-ab33-d306e2833dc5-kube-api-access-7zhxh\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.780936 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57bd8418-d205-4455-ab33-d306e2833dc5" (UID: "57bd8418-d205-4455-ab33-d306e2833dc5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:43:16 crc kubenswrapper[4701]: I1121 20:43:16.879711 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bd8418-d205-4455-ab33-d306e2833dc5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.470574 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-swkwz" event={"ID":"57bd8418-d205-4455-ab33-d306e2833dc5","Type":"ContainerDied","Data":"e76fe1062861d78f97f968bc67d3d59ff0180bbaf147bd8d298abf11dfb6ffd6"} Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.470640 4701 scope.go:117] "RemoveContainer" containerID="6b6708f573b7ebd8f9e9d2ada43207be1c5880f744dc9142c8985f1598f3ab7a" Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.470647 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-swkwz" Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.493789 4701 scope.go:117] "RemoveContainer" containerID="dcb7cc2aa7b12534c6f6493e39adbb011a064c99bbd8816eeadb9b4fec8ee383" Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.518553 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-swkwz"] Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.531461 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-swkwz"] Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.531829 4701 scope.go:117] "RemoveContainer" containerID="1f2883d6c5d3a583ac4bb6d1e55cd952679720337a0bdf8f9b10fa9d2f69ad0b" Nov 21 20:43:17 crc kubenswrapper[4701]: I1121 20:43:17.967556 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" path="/var/lib/kubelet/pods/57bd8418-d205-4455-ab33-d306e2833dc5/volumes" Nov 21 20:43:23 crc kubenswrapper[4701]: I1121 20:43:23.951850 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:43:24 crc kubenswrapper[4701]: I1121 20:43:24.553273 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"61ed04b164680d4d9f55d5d2bc2dda8c52790bbd62e7733688bf5c8ae3f7d69f"} Nov 21 20:44:05 crc kubenswrapper[4701]: I1121 20:44:05.105938 4701 generic.go:334] "Generic (PLEG): container finished" podID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerID="0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c" exitCode=0 Nov 21 20:44:05 crc kubenswrapper[4701]: I1121 20:44:05.106028 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fspj5/must-gather-4xwmw" event={"ID":"74b4c785-a4b8-4ac3-8906-0cb78c310784","Type":"ContainerDied","Data":"0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c"} Nov 21 20:44:05 crc kubenswrapper[4701]: I1121 20:44:05.108127 4701 scope.go:117] "RemoveContainer" containerID="0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c" Nov 21 20:44:05 crc kubenswrapper[4701]: I1121 20:44:05.404705 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fspj5_must-gather-4xwmw_74b4c785-a4b8-4ac3-8906-0cb78c310784/gather/0.log" Nov 21 20:44:08 crc kubenswrapper[4701]: E1121 20:44:08.142089 4701 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.12:41426->38.102.83.12:39339: write tcp 38.102.83.12:41426->38.102.83.12:39339: write: broken pipe Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.093295 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gppm9"] Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094723 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="extract-content" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094742 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="extract-content" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094767 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="extract-utilities" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094775 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="extract-utilities" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094787 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="extract-utilities" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094797 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="extract-utilities" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094816 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="extract-content" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094824 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="extract-content" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094835 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="extract-content" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094843 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="extract-content" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094869 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="extract-utilities" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094878 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="extract-utilities" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094904 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094912 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094934 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094942 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: E1121 20:44:13.094958 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.094966 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.095317 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="515b78f2-c72c-409d-8101-997b42623e59" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.095344 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="6410a611-8058-4bcd-9058-80e84ec073d5" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.095392 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="57bd8418-d205-4455-ab33-d306e2833dc5" containerName="registry-server" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.098659 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.112813 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gppm9"] Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.231594 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-utilities\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.232008 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-catalog-content\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.232154 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnf8c\" (UniqueName: \"kubernetes.io/projected/5108bcab-bfde-4295-8027-2db38c13eeab-kube-api-access-qnf8c\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.235000 4701 scope.go:117] "RemoveContainer" containerID="76f6f8205e8985f8472522d4f50867938a99271c700edec90145423a43b9af62" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.334889 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnf8c\" (UniqueName: \"kubernetes.io/projected/5108bcab-bfde-4295-8027-2db38c13eeab-kube-api-access-qnf8c\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.335116 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-utilities\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.335317 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-catalog-content\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.335712 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-utilities\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.335722 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-catalog-content\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.355874 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnf8c\" (UniqueName: \"kubernetes.io/projected/5108bcab-bfde-4295-8027-2db38c13eeab-kube-api-access-qnf8c\") pod \"community-operators-gppm9\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:13 crc kubenswrapper[4701]: I1121 20:44:13.444436 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:14 crc kubenswrapper[4701]: I1121 20:44:14.033868 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gppm9"] Nov 21 20:44:14 crc kubenswrapper[4701]: I1121 20:44:14.231067 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerStarted","Data":"b487b9f48097500e3c1ae912a3a6118fc685d8cbd2eb8fdf3c4a78399b95bbcc"} Nov 21 20:44:14 crc kubenswrapper[4701]: I1121 20:44:14.530878 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fspj5/must-gather-4xwmw"] Nov 21 20:44:14 crc kubenswrapper[4701]: I1121 20:44:14.531465 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-fspj5/must-gather-4xwmw" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="copy" containerID="cri-o://e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e" gracePeriod=2 Nov 21 20:44:14 crc kubenswrapper[4701]: I1121 20:44:14.545727 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fspj5/must-gather-4xwmw"] Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.014427 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fspj5_must-gather-4xwmw_74b4c785-a4b8-4ac3-8906-0cb78c310784/copy/0.log" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.015381 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.083319 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/74b4c785-a4b8-4ac3-8906-0cb78c310784-must-gather-output\") pod \"74b4c785-a4b8-4ac3-8906-0cb78c310784\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.083413 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9qww\" (UniqueName: \"kubernetes.io/projected/74b4c785-a4b8-4ac3-8906-0cb78c310784-kube-api-access-j9qww\") pod \"74b4c785-a4b8-4ac3-8906-0cb78c310784\" (UID: \"74b4c785-a4b8-4ac3-8906-0cb78c310784\") " Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.090457 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74b4c785-a4b8-4ac3-8906-0cb78c310784-kube-api-access-j9qww" (OuterVolumeSpecName: "kube-api-access-j9qww") pod "74b4c785-a4b8-4ac3-8906-0cb78c310784" (UID: "74b4c785-a4b8-4ac3-8906-0cb78c310784"). InnerVolumeSpecName "kube-api-access-j9qww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.186577 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9qww\" (UniqueName: \"kubernetes.io/projected/74b4c785-a4b8-4ac3-8906-0cb78c310784-kube-api-access-j9qww\") on node \"crc\" DevicePath \"\"" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.243979 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fspj5_must-gather-4xwmw_74b4c785-a4b8-4ac3-8906-0cb78c310784/copy/0.log" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.244328 4701 generic.go:334] "Generic (PLEG): container finished" podID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerID="e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e" exitCode=143 Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.244398 4701 scope.go:117] "RemoveContainer" containerID="e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.244523 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fspj5/must-gather-4xwmw" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.248971 4701 generic.go:334] "Generic (PLEG): container finished" podID="5108bcab-bfde-4295-8027-2db38c13eeab" containerID="ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16" exitCode=0 Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.249020 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerDied","Data":"ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16"} Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.278473 4701 scope.go:117] "RemoveContainer" containerID="0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.280333 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74b4c785-a4b8-4ac3-8906-0cb78c310784-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "74b4c785-a4b8-4ac3-8906-0cb78c310784" (UID: "74b4c785-a4b8-4ac3-8906-0cb78c310784"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.290262 4701 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/74b4c785-a4b8-4ac3-8906-0cb78c310784-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.326079 4701 scope.go:117] "RemoveContainer" containerID="e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e" Nov 21 20:44:15 crc kubenswrapper[4701]: E1121 20:44:15.333753 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e\": container with ID starting with e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e not found: ID does not exist" containerID="e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.333886 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e"} err="failed to get container status \"e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e\": rpc error: code = NotFound desc = could not find container \"e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e\": container with ID starting with e9b1ff9923aac77509b3591b7ac949cc3883bf933afa4542e65345783276485e not found: ID does not exist" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.333997 4701 scope.go:117] "RemoveContainer" containerID="0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c" Nov 21 20:44:15 crc kubenswrapper[4701]: E1121 20:44:15.335581 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c\": container with ID starting with 0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c not found: ID does not exist" containerID="0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.335659 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c"} err="failed to get container status \"0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c\": rpc error: code = NotFound desc = could not find container \"0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c\": container with ID starting with 0bd024b44216fe901914fac5f35fe5c15c4d4230c057eb480609d6c056bded4c not found: ID does not exist" Nov 21 20:44:15 crc kubenswrapper[4701]: I1121 20:44:15.962404 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" path="/var/lib/kubelet/pods/74b4c785-a4b8-4ac3-8906-0cb78c310784/volumes" Nov 21 20:44:16 crc kubenswrapper[4701]: I1121 20:44:16.269051 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerStarted","Data":"d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d"} Nov 21 20:44:18 crc kubenswrapper[4701]: I1121 20:44:18.295618 4701 generic.go:334] "Generic (PLEG): container finished" podID="5108bcab-bfde-4295-8027-2db38c13eeab" containerID="d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d" exitCode=0 Nov 21 20:44:18 crc kubenswrapper[4701]: I1121 20:44:18.295684 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerDied","Data":"d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d"} Nov 21 20:44:19 crc kubenswrapper[4701]: I1121 20:44:19.307018 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerStarted","Data":"265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52"} Nov 21 20:44:19 crc kubenswrapper[4701]: I1121 20:44:19.334948 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gppm9" podStartSLOduration=2.906789684 podStartE2EDuration="6.33492658s" podCreationTimestamp="2025-11-21 20:44:13 +0000 UTC" firstStartedPulling="2025-11-21 20:44:15.253820176 +0000 UTC m=+6146.038960203" lastFinishedPulling="2025-11-21 20:44:18.681957072 +0000 UTC m=+6149.467097099" observedRunningTime="2025-11-21 20:44:19.327357988 +0000 UTC m=+6150.112498025" watchObservedRunningTime="2025-11-21 20:44:19.33492658 +0000 UTC m=+6150.120066597" Nov 21 20:44:23 crc kubenswrapper[4701]: I1121 20:44:23.445371 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:23 crc kubenswrapper[4701]: I1121 20:44:23.448432 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:23 crc kubenswrapper[4701]: I1121 20:44:23.513824 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:24 crc kubenswrapper[4701]: I1121 20:44:24.452056 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:27 crc kubenswrapper[4701]: I1121 20:44:27.169690 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gppm9"] Nov 21 20:44:27 crc kubenswrapper[4701]: I1121 20:44:27.411180 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gppm9" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="registry-server" containerID="cri-o://265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52" gracePeriod=2 Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.133320 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.238006 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-catalog-content\") pod \"5108bcab-bfde-4295-8027-2db38c13eeab\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.238251 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-utilities\") pod \"5108bcab-bfde-4295-8027-2db38c13eeab\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.238642 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnf8c\" (UniqueName: \"kubernetes.io/projected/5108bcab-bfde-4295-8027-2db38c13eeab-kube-api-access-qnf8c\") pod \"5108bcab-bfde-4295-8027-2db38c13eeab\" (UID: \"5108bcab-bfde-4295-8027-2db38c13eeab\") " Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.238957 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-utilities" (OuterVolumeSpecName: "utilities") pod "5108bcab-bfde-4295-8027-2db38c13eeab" (UID: "5108bcab-bfde-4295-8027-2db38c13eeab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.239624 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.245385 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5108bcab-bfde-4295-8027-2db38c13eeab-kube-api-access-qnf8c" (OuterVolumeSpecName: "kube-api-access-qnf8c") pod "5108bcab-bfde-4295-8027-2db38c13eeab" (UID: "5108bcab-bfde-4295-8027-2db38c13eeab"). InnerVolumeSpecName "kube-api-access-qnf8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.288239 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5108bcab-bfde-4295-8027-2db38c13eeab" (UID: "5108bcab-bfde-4295-8027-2db38c13eeab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.340969 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108bcab-bfde-4295-8027-2db38c13eeab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.341005 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnf8c\" (UniqueName: \"kubernetes.io/projected/5108bcab-bfde-4295-8027-2db38c13eeab-kube-api-access-qnf8c\") on node \"crc\" DevicePath \"\"" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.424605 4701 generic.go:334] "Generic (PLEG): container finished" podID="5108bcab-bfde-4295-8027-2db38c13eeab" containerID="265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52" exitCode=0 Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.424667 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerDied","Data":"265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52"} Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.424722 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gppm9" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.424741 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gppm9" event={"ID":"5108bcab-bfde-4295-8027-2db38c13eeab","Type":"ContainerDied","Data":"b487b9f48097500e3c1ae912a3a6118fc685d8cbd2eb8fdf3c4a78399b95bbcc"} Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.424763 4701 scope.go:117] "RemoveContainer" containerID="265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.448519 4701 scope.go:117] "RemoveContainer" containerID="d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.479169 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gppm9"] Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.493410 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gppm9"] Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.504908 4701 scope.go:117] "RemoveContainer" containerID="ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.554161 4701 scope.go:117] "RemoveContainer" containerID="265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52" Nov 21 20:44:28 crc kubenswrapper[4701]: E1121 20:44:28.561749 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52\": container with ID starting with 265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52 not found: ID does not exist" containerID="265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.561875 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52"} err="failed to get container status \"265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52\": rpc error: code = NotFound desc = could not find container \"265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52\": container with ID starting with 265bc17cc207fbb5ecfacf5ec1c9c449fde96dc2e317e8e47d53ad12f9734f52 not found: ID does not exist" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.561952 4701 scope.go:117] "RemoveContainer" containerID="d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d" Nov 21 20:44:28 crc kubenswrapper[4701]: E1121 20:44:28.562383 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d\": container with ID starting with d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d not found: ID does not exist" containerID="d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.562425 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d"} err="failed to get container status \"d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d\": rpc error: code = NotFound desc = could not find container \"d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d\": container with ID starting with d71902364a8d43207c02bb7576167ed5fc190d005e5b646f6048b75a1c60356d not found: ID does not exist" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.562461 4701 scope.go:117] "RemoveContainer" containerID="ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16" Nov 21 20:44:28 crc kubenswrapper[4701]: E1121 20:44:28.563708 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16\": container with ID starting with ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16 not found: ID does not exist" containerID="ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16" Nov 21 20:44:28 crc kubenswrapper[4701]: I1121 20:44:28.563794 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16"} err="failed to get container status \"ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16\": rpc error: code = NotFound desc = could not find container \"ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16\": container with ID starting with ce35091fa724bfc48595f835a0f45dc188f875945cd87a9244aad7253f41ed16 not found: ID does not exist" Nov 21 20:44:29 crc kubenswrapper[4701]: I1121 20:44:29.969470 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" path="/var/lib/kubelet/pods/5108bcab-bfde-4295-8027-2db38c13eeab/volumes" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.166515 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm"] Nov 21 20:45:00 crc kubenswrapper[4701]: E1121 20:45:00.168113 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="registry-server" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168135 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="registry-server" Nov 21 20:45:00 crc kubenswrapper[4701]: E1121 20:45:00.168160 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="copy" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168168 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="copy" Nov 21 20:45:00 crc kubenswrapper[4701]: E1121 20:45:00.168185 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="gather" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168192 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="gather" Nov 21 20:45:00 crc kubenswrapper[4701]: E1121 20:45:00.168226 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="extract-content" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168232 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="extract-content" Nov 21 20:45:00 crc kubenswrapper[4701]: E1121 20:45:00.168249 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="extract-utilities" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168256 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="extract-utilities" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168483 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="copy" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168498 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="74b4c785-a4b8-4ac3-8906-0cb78c310784" containerName="gather" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.168511 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="5108bcab-bfde-4295-8027-2db38c13eeab" containerName="registry-server" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.169546 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.173673 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.173969 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.206670 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm"] Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.287851 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhwk2\" (UniqueName: \"kubernetes.io/projected/55302d15-d2a5-406e-af54-1c8724c4e046-kube-api-access-lhwk2\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.287956 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55302d15-d2a5-406e-af54-1c8724c4e046-secret-volume\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.288066 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55302d15-d2a5-406e-af54-1c8724c4e046-config-volume\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.390596 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhwk2\" (UniqueName: \"kubernetes.io/projected/55302d15-d2a5-406e-af54-1c8724c4e046-kube-api-access-lhwk2\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.390691 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55302d15-d2a5-406e-af54-1c8724c4e046-secret-volume\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.390721 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55302d15-d2a5-406e-af54-1c8724c4e046-config-volume\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.392107 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55302d15-d2a5-406e-af54-1c8724c4e046-config-volume\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.403038 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55302d15-d2a5-406e-af54-1c8724c4e046-secret-volume\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.413396 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhwk2\" (UniqueName: \"kubernetes.io/projected/55302d15-d2a5-406e-af54-1c8724c4e046-kube-api-access-lhwk2\") pod \"collect-profiles-29395965-q7mcm\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.504158 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.855442 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm"] Nov 21 20:45:00 crc kubenswrapper[4701]: I1121 20:45:00.884032 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" event={"ID":"55302d15-d2a5-406e-af54-1c8724c4e046","Type":"ContainerStarted","Data":"6fd86dbbb1f18073601e3f9b9ed542253c61c827919c07aa42a6cc82810cbdc8"} Nov 21 20:45:01 crc kubenswrapper[4701]: I1121 20:45:01.898021 4701 generic.go:334] "Generic (PLEG): container finished" podID="55302d15-d2a5-406e-af54-1c8724c4e046" containerID="ddc43f7960e0a647973caaf200bcc5cd583d68d454f6544ffb9241d5460244db" exitCode=0 Nov 21 20:45:01 crc kubenswrapper[4701]: I1121 20:45:01.898082 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" event={"ID":"55302d15-d2a5-406e-af54-1c8724c4e046","Type":"ContainerDied","Data":"ddc43f7960e0a647973caaf200bcc5cd583d68d454f6544ffb9241d5460244db"} Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.388154 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.482917 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55302d15-d2a5-406e-af54-1c8724c4e046-config-volume\") pod \"55302d15-d2a5-406e-af54-1c8724c4e046\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.483079 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55302d15-d2a5-406e-af54-1c8724c4e046-secret-volume\") pod \"55302d15-d2a5-406e-af54-1c8724c4e046\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.483114 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhwk2\" (UniqueName: \"kubernetes.io/projected/55302d15-d2a5-406e-af54-1c8724c4e046-kube-api-access-lhwk2\") pod \"55302d15-d2a5-406e-af54-1c8724c4e046\" (UID: \"55302d15-d2a5-406e-af54-1c8724c4e046\") " Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.483999 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55302d15-d2a5-406e-af54-1c8724c4e046-config-volume" (OuterVolumeSpecName: "config-volume") pod "55302d15-d2a5-406e-af54-1c8724c4e046" (UID: "55302d15-d2a5-406e-af54-1c8724c4e046"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.491098 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55302d15-d2a5-406e-af54-1c8724c4e046-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "55302d15-d2a5-406e-af54-1c8724c4e046" (UID: "55302d15-d2a5-406e-af54-1c8724c4e046"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.491959 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55302d15-d2a5-406e-af54-1c8724c4e046-kube-api-access-lhwk2" (OuterVolumeSpecName: "kube-api-access-lhwk2") pod "55302d15-d2a5-406e-af54-1c8724c4e046" (UID: "55302d15-d2a5-406e-af54-1c8724c4e046"). InnerVolumeSpecName "kube-api-access-lhwk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.586421 4701 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/55302d15-d2a5-406e-af54-1c8724c4e046-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.586467 4701 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/55302d15-d2a5-406e-af54-1c8724c4e046-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.586481 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhwk2\" (UniqueName: \"kubernetes.io/projected/55302d15-d2a5-406e-af54-1c8724c4e046-kube-api-access-lhwk2\") on node \"crc\" DevicePath \"\"" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.934916 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" event={"ID":"55302d15-d2a5-406e-af54-1c8724c4e046","Type":"ContainerDied","Data":"6fd86dbbb1f18073601e3f9b9ed542253c61c827919c07aa42a6cc82810cbdc8"} Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.935287 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fd86dbbb1f18073601e3f9b9ed542253c61c827919c07aa42a6cc82810cbdc8" Nov 21 20:45:03 crc kubenswrapper[4701]: I1121 20:45:03.935384 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395965-q7mcm" Nov 21 20:45:04 crc kubenswrapper[4701]: I1121 20:45:04.515061 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz"] Nov 21 20:45:04 crc kubenswrapper[4701]: I1121 20:45:04.534486 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395920-jhddz"] Nov 21 20:45:05 crc kubenswrapper[4701]: I1121 20:45:05.978446 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f3a1f12-3ef6-41f0-9290-273103bcac4f" path="/var/lib/kubelet/pods/9f3a1f12-3ef6-41f0-9290-273103bcac4f/volumes" Nov 21 20:45:13 crc kubenswrapper[4701]: I1121 20:45:13.317925 4701 scope.go:117] "RemoveContainer" containerID="0c0c7d5a057b6bb2a6b7979c93c402ebd676f99ce76988ad488eab4ace449bb2" Nov 21 20:45:13 crc kubenswrapper[4701]: I1121 20:45:13.357116 4701 scope.go:117] "RemoveContainer" containerID="64dd3ffa7f62b8fb727766ab3216e32663922ba79df50f7170cf75c8222b0604" Nov 21 20:45:48 crc kubenswrapper[4701]: I1121 20:45:48.613869 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:45:48 crc kubenswrapper[4701]: I1121 20:45:48.614845 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:46:18 crc kubenswrapper[4701]: I1121 20:46:18.613661 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:46:18 crc kubenswrapper[4701]: I1121 20:46:18.614495 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:46:48 crc kubenswrapper[4701]: I1121 20:46:48.613859 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:46:48 crc kubenswrapper[4701]: I1121 20:46:48.614537 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:46:48 crc kubenswrapper[4701]: I1121 20:46:48.614600 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:46:48 crc kubenswrapper[4701]: I1121 20:46:48.615499 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"61ed04b164680d4d9f55d5d2bc2dda8c52790bbd62e7733688bf5c8ae3f7d69f"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:46:48 crc kubenswrapper[4701]: I1121 20:46:48.615744 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://61ed04b164680d4d9f55d5d2bc2dda8c52790bbd62e7733688bf5c8ae3f7d69f" gracePeriod=600 Nov 21 20:46:49 crc kubenswrapper[4701]: I1121 20:46:49.416019 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="61ed04b164680d4d9f55d5d2bc2dda8c52790bbd62e7733688bf5c8ae3f7d69f" exitCode=0 Nov 21 20:46:49 crc kubenswrapper[4701]: I1121 20:46:49.416041 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"61ed04b164680d4d9f55d5d2bc2dda8c52790bbd62e7733688bf5c8ae3f7d69f"} Nov 21 20:46:49 crc kubenswrapper[4701]: I1121 20:46:49.417243 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62"} Nov 21 20:46:49 crc kubenswrapper[4701]: I1121 20:46:49.417303 4701 scope.go:117] "RemoveContainer" containerID="a8ef73b4a01861450b05d5f0ee923d59212be6c48e5ab1447c10cf0a3199c233" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.883079 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-scggw/must-gather-lv2mj"] Nov 21 20:47:26 crc kubenswrapper[4701]: E1121 20:47:26.884554 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55302d15-d2a5-406e-af54-1c8724c4e046" containerName="collect-profiles" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.884574 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="55302d15-d2a5-406e-af54-1c8724c4e046" containerName="collect-profiles" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.884781 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="55302d15-d2a5-406e-af54-1c8724c4e046" containerName="collect-profiles" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.886052 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.898119 4701 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-scggw"/"default-dockercfg-lmsh6" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.898260 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-scggw"/"kube-root-ca.crt" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.898278 4701 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-scggw"/"openshift-service-ca.crt" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.915017 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-scggw/must-gather-lv2mj"] Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.916124 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vszxn\" (UniqueName: \"kubernetes.io/projected/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-kube-api-access-vszxn\") pod \"must-gather-lv2mj\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:26 crc kubenswrapper[4701]: I1121 20:47:26.916266 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-must-gather-output\") pod \"must-gather-lv2mj\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.019597 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vszxn\" (UniqueName: \"kubernetes.io/projected/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-kube-api-access-vszxn\") pod \"must-gather-lv2mj\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.020571 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-must-gather-output\") pod \"must-gather-lv2mj\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.021055 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-must-gather-output\") pod \"must-gather-lv2mj\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.053800 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vszxn\" (UniqueName: \"kubernetes.io/projected/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-kube-api-access-vszxn\") pod \"must-gather-lv2mj\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.212292 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.675778 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-scggw/must-gather-lv2mj"] Nov 21 20:47:27 crc kubenswrapper[4701]: W1121 20:47:27.698666 4701 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85c412eb_d5a7_4a82_9f26_4dc70fbbaf8a.slice/crio-a2de242926608fc1a02e17f022cd4eb98ef4d813c758d6334808b63de8029940 WatchSource:0}: Error finding container a2de242926608fc1a02e17f022cd4eb98ef4d813c758d6334808b63de8029940: Status 404 returned error can't find the container with id a2de242926608fc1a02e17f022cd4eb98ef4d813c758d6334808b63de8029940 Nov 21 20:47:27 crc kubenswrapper[4701]: I1121 20:47:27.998340 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/must-gather-lv2mj" event={"ID":"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a","Type":"ContainerStarted","Data":"a2de242926608fc1a02e17f022cd4eb98ef4d813c758d6334808b63de8029940"} Nov 21 20:47:29 crc kubenswrapper[4701]: I1121 20:47:29.026565 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/must-gather-lv2mj" event={"ID":"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a","Type":"ContainerStarted","Data":"9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33"} Nov 21 20:47:29 crc kubenswrapper[4701]: I1121 20:47:29.026909 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/must-gather-lv2mj" event={"ID":"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a","Type":"ContainerStarted","Data":"96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290"} Nov 21 20:47:29 crc kubenswrapper[4701]: I1121 20:47:29.072580 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-scggw/must-gather-lv2mj" podStartSLOduration=3.07232236 podStartE2EDuration="3.07232236s" podCreationTimestamp="2025-11-21 20:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 20:47:29.050671373 +0000 UTC m=+6339.835811400" watchObservedRunningTime="2025-11-21 20:47:29.07232236 +0000 UTC m=+6339.857462407" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.438983 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-scggw/crc-debug-bj7zf"] Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.442661 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.555263 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nlh9\" (UniqueName: \"kubernetes.io/projected/c19f6379-56c8-481a-8fba-0e410bb63e1f-kube-api-access-8nlh9\") pod \"crc-debug-bj7zf\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.555334 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c19f6379-56c8-481a-8fba-0e410bb63e1f-host\") pod \"crc-debug-bj7zf\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.657954 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nlh9\" (UniqueName: \"kubernetes.io/projected/c19f6379-56c8-481a-8fba-0e410bb63e1f-kube-api-access-8nlh9\") pod \"crc-debug-bj7zf\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.658032 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c19f6379-56c8-481a-8fba-0e410bb63e1f-host\") pod \"crc-debug-bj7zf\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.658176 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c19f6379-56c8-481a-8fba-0e410bb63e1f-host\") pod \"crc-debug-bj7zf\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.690338 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nlh9\" (UniqueName: \"kubernetes.io/projected/c19f6379-56c8-481a-8fba-0e410bb63e1f-kube-api-access-8nlh9\") pod \"crc-debug-bj7zf\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:32 crc kubenswrapper[4701]: I1121 20:47:32.770574 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:47:33 crc kubenswrapper[4701]: I1121 20:47:33.082075 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-bj7zf" event={"ID":"c19f6379-56c8-481a-8fba-0e410bb63e1f","Type":"ContainerStarted","Data":"68f2fc4744a0d2b29bafb8cd3d33f9102a8daf2d011f9d5e1077fae5b9e14101"} Nov 21 20:47:34 crc kubenswrapper[4701]: I1121 20:47:34.104366 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-bj7zf" event={"ID":"c19f6379-56c8-481a-8fba-0e410bb63e1f","Type":"ContainerStarted","Data":"c956ce3f74116ebc827edd41bc87e3e3152deba5158330609e339d07462ed77b"} Nov 21 20:47:34 crc kubenswrapper[4701]: I1121 20:47:34.128926 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-scggw/crc-debug-bj7zf" podStartSLOduration=2.128905034 podStartE2EDuration="2.128905034s" podCreationTimestamp="2025-11-21 20:47:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 20:47:34.120924251 +0000 UTC m=+6344.906064278" watchObservedRunningTime="2025-11-21 20:47:34.128905034 +0000 UTC m=+6344.914045051" Nov 21 20:48:14 crc kubenswrapper[4701]: I1121 20:48:14.518381 4701 generic.go:334] "Generic (PLEG): container finished" podID="c19f6379-56c8-481a-8fba-0e410bb63e1f" containerID="c956ce3f74116ebc827edd41bc87e3e3152deba5158330609e339d07462ed77b" exitCode=0 Nov 21 20:48:14 crc kubenswrapper[4701]: I1121 20:48:14.518466 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-bj7zf" event={"ID":"c19f6379-56c8-481a-8fba-0e410bb63e1f","Type":"ContainerDied","Data":"c956ce3f74116ebc827edd41bc87e3e3152deba5158330609e339d07462ed77b"} Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.668684 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.693432 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nlh9\" (UniqueName: \"kubernetes.io/projected/c19f6379-56c8-481a-8fba-0e410bb63e1f-kube-api-access-8nlh9\") pod \"c19f6379-56c8-481a-8fba-0e410bb63e1f\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.693586 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c19f6379-56c8-481a-8fba-0e410bb63e1f-host\") pod \"c19f6379-56c8-481a-8fba-0e410bb63e1f\" (UID: \"c19f6379-56c8-481a-8fba-0e410bb63e1f\") " Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.693651 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c19f6379-56c8-481a-8fba-0e410bb63e1f-host" (OuterVolumeSpecName: "host") pod "c19f6379-56c8-481a-8fba-0e410bb63e1f" (UID: "c19f6379-56c8-481a-8fba-0e410bb63e1f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.694043 4701 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c19f6379-56c8-481a-8fba-0e410bb63e1f-host\") on node \"crc\" DevicePath \"\"" Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.709842 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-scggw/crc-debug-bj7zf"] Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.711673 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c19f6379-56c8-481a-8fba-0e410bb63e1f-kube-api-access-8nlh9" (OuterVolumeSpecName: "kube-api-access-8nlh9") pod "c19f6379-56c8-481a-8fba-0e410bb63e1f" (UID: "c19f6379-56c8-481a-8fba-0e410bb63e1f"). InnerVolumeSpecName "kube-api-access-8nlh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.717414 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-scggw/crc-debug-bj7zf"] Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.796780 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nlh9\" (UniqueName: \"kubernetes.io/projected/c19f6379-56c8-481a-8fba-0e410bb63e1f-kube-api-access-8nlh9\") on node \"crc\" DevicePath \"\"" Nov 21 20:48:15 crc kubenswrapper[4701]: I1121 20:48:15.963912 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c19f6379-56c8-481a-8fba-0e410bb63e1f" path="/var/lib/kubelet/pods/c19f6379-56c8-481a-8fba-0e410bb63e1f/volumes" Nov 21 20:48:16 crc kubenswrapper[4701]: I1121 20:48:16.538871 4701 scope.go:117] "RemoveContainer" containerID="c956ce3f74116ebc827edd41bc87e3e3152deba5158330609e339d07462ed77b" Nov 21 20:48:16 crc kubenswrapper[4701]: I1121 20:48:16.539038 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-bj7zf" Nov 21 20:48:16 crc kubenswrapper[4701]: I1121 20:48:16.902120 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-scggw/crc-debug-d5ftn"] Nov 21 20:48:16 crc kubenswrapper[4701]: E1121 20:48:16.902713 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c19f6379-56c8-481a-8fba-0e410bb63e1f" containerName="container-00" Nov 21 20:48:16 crc kubenswrapper[4701]: I1121 20:48:16.902731 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="c19f6379-56c8-481a-8fba-0e410bb63e1f" containerName="container-00" Nov 21 20:48:16 crc kubenswrapper[4701]: I1121 20:48:16.902971 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="c19f6379-56c8-481a-8fba-0e410bb63e1f" containerName="container-00" Nov 21 20:48:16 crc kubenswrapper[4701]: I1121 20:48:16.903833 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.023100 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-host\") pod \"crc-debug-d5ftn\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.023155 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74tfn\" (UniqueName: \"kubernetes.io/projected/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-kube-api-access-74tfn\") pod \"crc-debug-d5ftn\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.125269 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-host\") pod \"crc-debug-d5ftn\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.125649 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74tfn\" (UniqueName: \"kubernetes.io/projected/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-kube-api-access-74tfn\") pod \"crc-debug-d5ftn\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.125442 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-host\") pod \"crc-debug-d5ftn\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.146000 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74tfn\" (UniqueName: \"kubernetes.io/projected/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-kube-api-access-74tfn\") pod \"crc-debug-d5ftn\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.218786 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.549598 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-d5ftn" event={"ID":"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc","Type":"ContainerStarted","Data":"aa8beaf0e4ceba258061cc7d4c808dbf6d1d35ac27f0b125664192521892b781"} Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.550098 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-d5ftn" event={"ID":"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc","Type":"ContainerStarted","Data":"46cf37ea0c82a5425f56381ada289612e113f739bd68edf835dddfc4269a4b9c"} Nov 21 20:48:17 crc kubenswrapper[4701]: I1121 20:48:17.566000 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-scggw/crc-debug-d5ftn" podStartSLOduration=1.565981936 podStartE2EDuration="1.565981936s" podCreationTimestamp="2025-11-21 20:48:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 20:48:17.56201378 +0000 UTC m=+6388.347153817" watchObservedRunningTime="2025-11-21 20:48:17.565981936 +0000 UTC m=+6388.351121963" Nov 21 20:48:18 crc kubenswrapper[4701]: I1121 20:48:18.567473 4701 generic.go:334] "Generic (PLEG): container finished" podID="f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" containerID="aa8beaf0e4ceba258061cc7d4c808dbf6d1d35ac27f0b125664192521892b781" exitCode=0 Nov 21 20:48:18 crc kubenswrapper[4701]: I1121 20:48:18.567647 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-d5ftn" event={"ID":"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc","Type":"ContainerDied","Data":"aa8beaf0e4ceba258061cc7d4c808dbf6d1d35ac27f0b125664192521892b781"} Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.680855 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.733711 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-scggw/crc-debug-d5ftn"] Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.743169 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-scggw/crc-debug-d5ftn"] Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.785567 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74tfn\" (UniqueName: \"kubernetes.io/projected/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-kube-api-access-74tfn\") pod \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.785607 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-host\") pod \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\" (UID: \"f2213ec6-ff74-4b75-93ef-3d5d86fe23cc\") " Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.786001 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-host" (OuterVolumeSpecName: "host") pod "f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" (UID: "f2213ec6-ff74-4b75-93ef-3d5d86fe23cc"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.786235 4701 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-host\") on node \"crc\" DevicePath \"\"" Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.790878 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-kube-api-access-74tfn" (OuterVolumeSpecName: "kube-api-access-74tfn") pod "f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" (UID: "f2213ec6-ff74-4b75-93ef-3d5d86fe23cc"). InnerVolumeSpecName "kube-api-access-74tfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.888340 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74tfn\" (UniqueName: \"kubernetes.io/projected/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc-kube-api-access-74tfn\") on node \"crc\" DevicePath \"\"" Nov 21 20:48:19 crc kubenswrapper[4701]: I1121 20:48:19.963492 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" path="/var/lib/kubelet/pods/f2213ec6-ff74-4b75-93ef-3d5d86fe23cc/volumes" Nov 21 20:48:20 crc kubenswrapper[4701]: I1121 20:48:20.592409 4701 scope.go:117] "RemoveContainer" containerID="aa8beaf0e4ceba258061cc7d4c808dbf6d1d35ac27f0b125664192521892b781" Nov 21 20:48:20 crc kubenswrapper[4701]: I1121 20:48:20.592974 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-d5ftn" Nov 21 20:48:20 crc kubenswrapper[4701]: I1121 20:48:20.933752 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-scggw/crc-debug-xxq8j"] Nov 21 20:48:20 crc kubenswrapper[4701]: E1121 20:48:20.935999 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" containerName="container-00" Nov 21 20:48:20 crc kubenswrapper[4701]: I1121 20:48:20.936065 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" containerName="container-00" Nov 21 20:48:20 crc kubenswrapper[4701]: I1121 20:48:20.937364 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2213ec6-ff74-4b75-93ef-3d5d86fe23cc" containerName="container-00" Nov 21 20:48:20 crc kubenswrapper[4701]: I1121 20:48:20.939069 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.012914 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8qgd\" (UniqueName: \"kubernetes.io/projected/a02b4582-b625-4515-af6b-bef20d78d5db-kube-api-access-b8qgd\") pod \"crc-debug-xxq8j\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.013372 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a02b4582-b625-4515-af6b-bef20d78d5db-host\") pod \"crc-debug-xxq8j\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.116446 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a02b4582-b625-4515-af6b-bef20d78d5db-host\") pod \"crc-debug-xxq8j\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.116575 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8qgd\" (UniqueName: \"kubernetes.io/projected/a02b4582-b625-4515-af6b-bef20d78d5db-kube-api-access-b8qgd\") pod \"crc-debug-xxq8j\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.116651 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a02b4582-b625-4515-af6b-bef20d78d5db-host\") pod \"crc-debug-xxq8j\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.145968 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8qgd\" (UniqueName: \"kubernetes.io/projected/a02b4582-b625-4515-af6b-bef20d78d5db-kube-api-access-b8qgd\") pod \"crc-debug-xxq8j\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.272214 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:21 crc kubenswrapper[4701]: I1121 20:48:21.606293 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-xxq8j" event={"ID":"a02b4582-b625-4515-af6b-bef20d78d5db","Type":"ContainerStarted","Data":"9a5839493c5d1d047dc17383d01724c33bfaafd6cc4ddce213388a5a46fa0efe"} Nov 21 20:48:22 crc kubenswrapper[4701]: I1121 20:48:22.620262 4701 generic.go:334] "Generic (PLEG): container finished" podID="a02b4582-b625-4515-af6b-bef20d78d5db" containerID="5fae35be2da2225c96c778d7345afaa1f9fdd693757f865b4c5b2b8450ed4e6c" exitCode=0 Nov 21 20:48:22 crc kubenswrapper[4701]: I1121 20:48:22.620359 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/crc-debug-xxq8j" event={"ID":"a02b4582-b625-4515-af6b-bef20d78d5db","Type":"ContainerDied","Data":"5fae35be2da2225c96c778d7345afaa1f9fdd693757f865b4c5b2b8450ed4e6c"} Nov 21 20:48:22 crc kubenswrapper[4701]: I1121 20:48:22.668504 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-scggw/crc-debug-xxq8j"] Nov 21 20:48:22 crc kubenswrapper[4701]: I1121 20:48:22.678347 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-scggw/crc-debug-xxq8j"] Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.751478 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.887546 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a02b4582-b625-4515-af6b-bef20d78d5db-host\") pod \"a02b4582-b625-4515-af6b-bef20d78d5db\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.887889 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8qgd\" (UniqueName: \"kubernetes.io/projected/a02b4582-b625-4515-af6b-bef20d78d5db-kube-api-access-b8qgd\") pod \"a02b4582-b625-4515-af6b-bef20d78d5db\" (UID: \"a02b4582-b625-4515-af6b-bef20d78d5db\") " Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.888192 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a02b4582-b625-4515-af6b-bef20d78d5db-host" (OuterVolumeSpecName: "host") pod "a02b4582-b625-4515-af6b-bef20d78d5db" (UID: "a02b4582-b625-4515-af6b-bef20d78d5db"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.888699 4701 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a02b4582-b625-4515-af6b-bef20d78d5db-host\") on node \"crc\" DevicePath \"\"" Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.904401 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a02b4582-b625-4515-af6b-bef20d78d5db-kube-api-access-b8qgd" (OuterVolumeSpecName: "kube-api-access-b8qgd") pod "a02b4582-b625-4515-af6b-bef20d78d5db" (UID: "a02b4582-b625-4515-af6b-bef20d78d5db"). InnerVolumeSpecName "kube-api-access-b8qgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.963991 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a02b4582-b625-4515-af6b-bef20d78d5db" path="/var/lib/kubelet/pods/a02b4582-b625-4515-af6b-bef20d78d5db/volumes" Nov 21 20:48:23 crc kubenswrapper[4701]: I1121 20:48:23.990839 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8qgd\" (UniqueName: \"kubernetes.io/projected/a02b4582-b625-4515-af6b-bef20d78d5db-kube-api-access-b8qgd\") on node \"crc\" DevicePath \"\"" Nov 21 20:48:24 crc kubenswrapper[4701]: I1121 20:48:24.646816 4701 scope.go:117] "RemoveContainer" containerID="5fae35be2da2225c96c778d7345afaa1f9fdd693757f865b4c5b2b8450ed4e6c" Nov 21 20:48:24 crc kubenswrapper[4701]: I1121 20:48:24.646892 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/crc-debug-xxq8j" Nov 21 20:49:05 crc kubenswrapper[4701]: I1121 20:49:05.755267 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-796cb85bf8-h88pn_59b306d9-cacf-4e38-b19f-60f8ebe026a7/barbican-api/0.log" Nov 21 20:49:05 crc kubenswrapper[4701]: I1121 20:49:05.937425 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-796cb85bf8-h88pn_59b306d9-cacf-4e38-b19f-60f8ebe026a7/barbican-api-log/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.010819 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-58799d9dcd-lkd7s_c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3/barbican-keystone-listener/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.113369 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-58799d9dcd-lkd7s_c8d0e0c3-70bf-4ce0-94ea-54f03b3c42e3/barbican-keystone-listener-log/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.215324 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6ff5fffc67-6nrzn_a20c7ac2-0856-4c64-8910-3c053184c47b/barbican-worker/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.305509 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6ff5fffc67-6nrzn_a20c7ac2-0856-4c64-8910-3c053184c47b/barbican-worker-log/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.437082 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-cbvnd_d8e201f8-f7ea-4bd5-8ba1-f0d85ca9d3d2/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.681499 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/ceilometer-central-agent/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.720867 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/proxy-httpd/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.722614 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/ceilometer-notification-agent/0.log" Nov 21 20:49:06 crc kubenswrapper[4701]: I1121 20:49:06.741740 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7446e023-4eae-4738-ab4a-4ddf024cd980/sg-core/0.log" Nov 21 20:49:07 crc kubenswrapper[4701]: I1121 20:49:07.023308 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f/cinder-api-log/0.log" Nov 21 20:49:07 crc kubenswrapper[4701]: I1121 20:49:07.335081 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9c177319-2b64-45dd-a3df-47ccaca79da4/probe/0.log" Nov 21 20:49:07 crc kubenswrapper[4701]: I1121 20:49:07.677554 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4b72befc-936a-4833-8e7a-f765c655a300/cinder-scheduler/0.log" Nov 21 20:49:07 crc kubenswrapper[4701]: I1121 20:49:07.694352 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9c177319-2b64-45dd-a3df-47ccaca79da4/cinder-backup/0.log" Nov 21 20:49:07 crc kubenswrapper[4701]: I1121 20:49:07.783924 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4b72befc-936a-4833-8e7a-f765c655a300/probe/0.log" Nov 21 20:49:07 crc kubenswrapper[4701]: I1121 20:49:07.858757 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_83a8cd4f-9aac-4e7b-81f4-ba9107a35a8f/cinder-api/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.045954 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_fbd75a47-d6fb-407b-bf7a-9750b745b820/probe/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.201064 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_fbd75a47-d6fb-407b-bf7a-9750b745b820/cinder-volume/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.362256 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_c2b29617-ccfa-4b0c-be12-52e9e7e06c33/probe/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.510173 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-t25h9_ae68b914-c2d3-4df9-bd3c-563524bb9ded/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.548787 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_c2b29617-ccfa-4b0c-be12-52e9e7e06c33/cinder-volume/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.726043 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-lcdx6_e275d37d-b55f-433f-b4be-cfba6b7b158e/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:08 crc kubenswrapper[4701]: I1121 20:49:08.793091 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5894fd8d75-7xcgx_7a22d8c3-ddf4-4901-b1c6-39a9099d1de6/init/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.106528 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5894fd8d75-7xcgx_7a22d8c3-ddf4-4901-b1c6-39a9099d1de6/init/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.139803 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vj8rp_775c15c9-3c73-4e78-ad8e-b02163afc9f2/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.311683 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5894fd8d75-7xcgx_7a22d8c3-ddf4-4901-b1c6-39a9099d1de6/dnsmasq-dns/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.415125 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_b405de0f-6523-4d69-b8a7-a73528f0df37/glance-httpd/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.432434 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_b405de0f-6523-4d69-b8a7-a73528f0df37/glance-log/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.565365 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_50767206-6e50-48ab-ab5f-2eee90151470/glance-httpd/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.621061 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_50767206-6e50-48ab-ab5f-2eee90151470/glance-log/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.798779 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6c68b8ff68-tfcgs_7d8b1846-dcd5-49b4-8eb2-74b0462538e1/horizon/0.log" Nov 21 20:49:09 crc kubenswrapper[4701]: I1121 20:49:09.948483 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-rgx2b_b4e65662-463f-4f48-b668-1ad55aaeb9fe/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:10 crc kubenswrapper[4701]: I1121 20:49:10.073365 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-wtbcx_40c71638-2add-4f3c-acc9-cc971cad107e/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:10 crc kubenswrapper[4701]: I1121 20:49:10.366033 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29395921-cv2wk_4039a132-b77c-4449-baa0-c79e6940472f/keystone-cron/0.log" Nov 21 20:49:10 crc kubenswrapper[4701]: I1121 20:49:10.603345 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_2ebd86aa-d8dc-4f48-b9a4-c6445bdb71ad/kube-state-metrics/0.log" Nov 21 20:49:10 crc kubenswrapper[4701]: I1121 20:49:10.911105 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-fls89_17c2df3e-2efb-4d8f-9e8a-ebecac6fb0bc/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:11 crc kubenswrapper[4701]: I1121 20:49:11.052840 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6c68b8ff68-tfcgs_7d8b1846-dcd5-49b4-8eb2-74b0462538e1/horizon-log/0.log" Nov 21 20:49:11 crc kubenswrapper[4701]: I1121 20:49:11.062159 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-65d76b5c54-c9d89_dee81498-90dd-46c0-949f-c3de3b9bfbd3/keystone-api/0.log" Nov 21 20:49:11 crc kubenswrapper[4701]: I1121 20:49:11.385511 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-7qnj5_44f05c88-6707-4ca9-a248-d5abc8ae5850/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:11 crc kubenswrapper[4701]: I1121 20:49:11.545532 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5b76b98545-tv4h2_1fc0c9dc-fc55-43fb-a2bb-727c01863fb5/neutron-api/0.log" Nov 21 20:49:11 crc kubenswrapper[4701]: I1121 20:49:11.592673 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5b76b98545-tv4h2_1fc0c9dc-fc55-43fb-a2bb-727c01863fb5/neutron-httpd/0.log" Nov 21 20:49:12 crc kubenswrapper[4701]: I1121 20:49:12.302683 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_6ff3d334-08d1-49a9-8483-48402c600ec2/nova-cell0-conductor-conductor/0.log" Nov 21 20:49:12 crc kubenswrapper[4701]: I1121 20:49:12.583785 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_beda6918-af8f-42ab-8599-44f3dc52229f/nova-cell1-conductor-conductor/0.log" Nov 21 20:49:13 crc kubenswrapper[4701]: I1121 20:49:13.053609 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_43fdb14d-22e9-469a-b6cb-00477daa5ece/nova-cell1-novncproxy-novncproxy/0.log" Nov 21 20:49:13 crc kubenswrapper[4701]: I1121 20:49:13.179588 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-rhwkh_a50527e7-3b38-471d-a03d-937e88e019f3/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:13 crc kubenswrapper[4701]: I1121 20:49:13.560726 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_3e2f4186-103f-4356-8b8a-80a07cde4ac4/nova-metadata-log/0.log" Nov 21 20:49:13 crc kubenswrapper[4701]: I1121 20:49:13.696493 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_7dfe7de3-4ade-4a2e-8826-be286e416d33/nova-api-log/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.297056 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b6432247-ed58-4dce-98d4-4267d0122151/mysql-bootstrap/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.371775 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_7dfe7de3-4ade-4a2e-8826-be286e416d33/nova-api-api/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.417120 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_3df472cf-6795-4ca8-908b-01f824bf4b5e/nova-scheduler-scheduler/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.519522 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b6432247-ed58-4dce-98d4-4267d0122151/mysql-bootstrap/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.628286 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b6432247-ed58-4dce-98d4-4267d0122151/galera/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.769377 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_117bcee4-5190-4738-8e03-19f77f4fb428/mysql-bootstrap/0.log" Nov 21 20:49:14 crc kubenswrapper[4701]: I1121 20:49:14.991174 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_117bcee4-5190-4738-8e03-19f77f4fb428/mysql-bootstrap/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.048717 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_117bcee4-5190-4738-8e03-19f77f4fb428/galera/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.209255 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_3cabfe57-5d37-4a59-93e3-aac4836f7d2c/openstackclient/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.379966 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-49p6k_d6bce0ec-3045-405e-914b-f466321dc7ea/ovn-controller/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.508470 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-msx5f_81c7a23e-51f0-4360-820b-6f4f7b7daa63/openstack-network-exporter/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.720321 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovsdb-server-init/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.866050 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovsdb-server-init/0.log" Nov 21 20:49:15 crc kubenswrapper[4701]: I1121 20:49:15.948733 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovsdb-server/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.236188 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-rdjpc_640fcd44-4a2e-475b-b296-5f37ac6d55e7/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.405501 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vqwr8_c1552bca-042c-4d9e-ac6f-8c8f762ac494/ovs-vswitchd/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.445786 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4b75b7e0-a14e-4889-9430-7cbb446d48d9/openstack-network-exporter/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.689671 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_76796a80-e8f7-43ed-862b-011b964a31f9/openstack-network-exporter/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.691919 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4b75b7e0-a14e-4889-9430-7cbb446d48d9/ovn-northd/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.880574 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_3e2f4186-103f-4356-8b8a-80a07cde4ac4/nova-metadata-metadata/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.961715 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_fae8c560-c6a6-453e-8c64-9dca8183e5c0/openstack-network-exporter/0.log" Nov 21 20:49:16 crc kubenswrapper[4701]: I1121 20:49:16.999187 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_76796a80-e8f7-43ed-862b-011b964a31f9/ovsdbserver-nb/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.125049 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_fae8c560-c6a6-453e-8c64-9dca8183e5c0/ovsdbserver-sb/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.431370 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/init-config-reloader/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.515496 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-78b94b5b48-685pj_c5db43f2-147c-4625-9f3a-9f68cc6afa8c/placement-api/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.589045 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-78b94b5b48-685pj_c5db43f2-147c-4625-9f3a-9f68cc6afa8c/placement-log/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.751486 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/init-config-reloader/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.763134 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/config-reloader/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.772480 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/prometheus/0.log" Nov 21 20:49:17 crc kubenswrapper[4701]: I1121 20:49:17.905597 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_71c6d659-75fd-4221-8b7e-1496221311fe/thanos-sidecar/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.005342 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c53b35a3-36ed-43a5-a400-4658b9408596/setup-container/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.247368 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c53b35a3-36ed-43a5-a400-4658b9408596/rabbitmq/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.262664 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c53b35a3-36ed-43a5-a400-4658b9408596/setup-container/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.268972 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_fa567817-ce17-4cb3-9e55-e14902a96420/setup-container/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.539404 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_fa567817-ce17-4cb3-9e55-e14902a96420/setup-container/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.593739 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_fa567817-ce17-4cb3-9e55-e14902a96420/rabbitmq/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.613501 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.613563 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.654768 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcd41199-949d-4c9f-9154-f83acb9bb997/setup-container/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.892054 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcd41199-949d-4c9f-9154-f83acb9bb997/setup-container/0.log" Nov 21 20:49:18 crc kubenswrapper[4701]: I1121 20:49:18.960269 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcd41199-949d-4c9f-9154-f83acb9bb997/rabbitmq/0.log" Nov 21 20:49:19 crc kubenswrapper[4701]: I1121 20:49:19.019341 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-kj7pb_bcdfb82f-4b6c-44ca-b282-1f803082c73d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:19 crc kubenswrapper[4701]: I1121 20:49:19.196130 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-8vgkh_c3df9720-470f-4076-93ad-cd09d2b8c1d4/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:19 crc kubenswrapper[4701]: I1121 20:49:19.570811 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-6nz7n_aa6fec42-0fdb-4b30-80b9-7cea4579dd05/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:19 crc kubenswrapper[4701]: I1121 20:49:19.644033 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-p5jn2_ca5a5ac3-7a81-42f3-8a15-5fce2f096bd1/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:19 crc kubenswrapper[4701]: I1121 20:49:19.862575 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-mxn98_53533f20-cd97-4dfe-a00c-e5f0e6f86403/ssh-known-hosts-edpm-deployment/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.066568 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-66cbbc6b59-4jhxd_567ed826-1db0-4018-b4ea-8af42596aa3e/proxy-server/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.161348 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-qnhkr_6e6a9aaa-b68d-4490-b0f3-975c6ace3d1a/swift-ring-rebalance/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.303593 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-66cbbc6b59-4jhxd_567ed826-1db0-4018-b4ea-8af42596aa3e/proxy-httpd/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.414153 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-auditor/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.462561 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-reaper/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.633444 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-server/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.636598 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-auditor/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.691744 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/account-replicator/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.780560 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-replicator/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.845468 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-server/0.log" Nov 21 20:49:20 crc kubenswrapper[4701]: I1121 20:49:20.886244 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/container-updater/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.016183 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-auditor/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.016763 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-expirer/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.129430 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-replicator/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.129861 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-server/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.229905 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/object-updater/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.286333 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/rsync/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.384895 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_bf8d5d78-fa29-41ff-94e0-6249f7e02e1b/swift-recon-cron/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.537467 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-97f7r_4f066a0d-46d8-4cfd-b188-495f77c256f1/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.672974 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_6dd5f296-841e-4527-88fe-3963fef0e450/tempest-tests-tempest-tests-runner/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.848875 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_e641415e-cd44-42d3-b2be-d1b45a79297a/test-operator-logs-container/0.log" Nov 21 20:49:21 crc kubenswrapper[4701]: I1121 20:49:21.948254 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rztzd_2576d57b-b7fc-4d3e-b4a5-e72f1d5ea80a/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 20:49:23 crc kubenswrapper[4701]: I1121 20:49:23.130212 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_2bbe25c3-cbc9-45d6-aabe-a9b8e69d044f/watcher-applier/0.log" Nov 21 20:49:23 crc kubenswrapper[4701]: I1121 20:49:23.555756 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_b909f8f0-603a-420b-8b12-2b15b6c0900e/watcher-api-log/0.log" Nov 21 20:49:27 crc kubenswrapper[4701]: I1121 20:49:27.092663 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_87723852-e421-4a28-a9ce-90390eb3b7a8/watcher-decision-engine/0.log" Nov 21 20:49:28 crc kubenswrapper[4701]: I1121 20:49:28.732156 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_b909f8f0-603a-420b-8b12-2b15b6c0900e/watcher-api/0.log" Nov 21 20:49:40 crc kubenswrapper[4701]: I1121 20:49:40.592855 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_2d891f55-8791-487d-b8f9-b4183da3e720/memcached/0.log" Nov 21 20:49:48 crc kubenswrapper[4701]: I1121 20:49:48.614081 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:49:48 crc kubenswrapper[4701]: I1121 20:49:48.615095 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.488173 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/util/0.log" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.668067 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/util/0.log" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.686105 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/pull/0.log" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.689677 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/pull/0.log" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.884592 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/util/0.log" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.937489 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/extract/0.log" Nov 21 20:49:57 crc kubenswrapper[4701]: I1121 20:49:57.939664 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_73a87b76858e92fe151b091de3edb46707149b621ebba4ced1a81819d1ctt98_c35200a2-6f14-4b98-b227-d93f103b9d76/pull/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.120294 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-lgvh6_4c5eabdd-f4f8-4180-be28-707592f6d24d/kube-rbac-proxy/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.265112 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-lgvh6_4c5eabdd-f4f8-4180-be28-707592f6d24d/manager/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.268316 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-2ccc7_566d8e82-b230-492d-a47b-80d2351b169e/kube-rbac-proxy/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.391236 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-2ccc7_566d8e82-b230-492d-a47b-80d2351b169e/manager/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.499362 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-qmdtp_87969819-3a91-4333-9585-72a2a27fa6c9/kube-rbac-proxy/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.529246 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-qmdtp_87969819-3a91-4333-9585-72a2a27fa6c9/manager/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.683544 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-t6hz7_0c6d96e4-2798-4525-bcec-61ad137140d8/kube-rbac-proxy/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.769742 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-t6hz7_0c6d96e4-2798-4525-bcec-61ad137140d8/manager/0.log" Nov 21 20:49:58 crc kubenswrapper[4701]: I1121 20:49:58.874792 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-c2mhg_ef203e45-f1b1-4a9a-9987-66bb33655a95/kube-rbac-proxy/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.017396 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-c2mhg_ef203e45-f1b1-4a9a-9987-66bb33655a95/manager/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.060391 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-kkz2m_a76b2214-2c16-4b55-bf3d-c7bdf1019237/kube-rbac-proxy/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.155117 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-kkz2m_a76b2214-2c16-4b55-bf3d-c7bdf1019237/manager/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.271206 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-vbqvb_b15963ff-1822-4079-8cce-266b05a9ac47/kube-rbac-proxy/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.492003 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-gg7tq_c7b87a42-0af4-4484-845e-f2993960537c/kube-rbac-proxy/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.511653 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-vbqvb_b15963ff-1822-4079-8cce-266b05a9ac47/manager/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.559091 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-gg7tq_c7b87a42-0af4-4484-845e-f2993960537c/manager/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.685914 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-hz5sk_61444bc1-a24a-4c29-94b8-953ae2dc8621/kube-rbac-proxy/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.797321 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-hz5sk_61444bc1-a24a-4c29-94b8-953ae2dc8621/manager/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.897329 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-467lr_bcf3ee80-4bca-445a-84aa-ef30d99b7b9a/kube-rbac-proxy/0.log" Nov 21 20:49:59 crc kubenswrapper[4701]: I1121 20:49:59.962592 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-467lr_bcf3ee80-4bca-445a-84aa-ef30d99b7b9a/manager/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.044078 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-rlq95_1440b54d-d3f5-46a9-b335-27a6d2031d24/kube-rbac-proxy/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.115393 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-rlq95_1440b54d-d3f5-46a9-b335-27a6d2031d24/manager/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.241572 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-rzvnf_8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d/kube-rbac-proxy/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.275719 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-rzvnf_8c7ae04c-6e93-4c37-b1e5-8bbcbe9ffa2d/manager/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.396825 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zrwsd_21028817-64c6-4a7c-8427-8ee3db1dec7b/kube-rbac-proxy/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.530686 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zrwsd_21028817-64c6-4a7c-8427-8ee3db1dec7b/manager/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.624676 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-qqmcw_015395c6-297a-4a90-a5fd-49dcdde237af/manager/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.637329 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-qqmcw_015395c6-297a-4a90-a5fd-49dcdde237af/kube-rbac-proxy/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.768930 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-c58b4_0439f0bf-0ea9-4553-a53c-74f87b31a6a7/kube-rbac-proxy/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.834256 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-c58b4_0439f0bf-0ea9-4553-a53c-74f87b31a6a7/manager/0.log" Nov 21 20:50:00 crc kubenswrapper[4701]: I1121 20:50:00.982562 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7467d8c866-fgkj9_743fbd83-3b42-4083-b06f-ae81d6294066/kube-rbac-proxy/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.130418 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6f8fb57dc8-vmnj6_fc60458d-83dd-4a11-b22d-6a8a7f5f01f6/kube-rbac-proxy/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.324177 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-hm4sz_42334fc1-ad97-4595-bb9b-4c7f736391e4/registry-server/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.348803 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6f8fb57dc8-vmnj6_fc60458d-83dd-4a11-b22d-6a8a7f5f01f6/operator/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.564232 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-vmsdw_9a58290e-d37e-4094-8ed8-4ed701c1292c/kube-rbac-proxy/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.661620 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-vmsdw_9a58290e-d37e-4094-8ed8-4ed701c1292c/manager/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.742271 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-96954_e2fc7504-afe1-4197-a366-c765c52366b0/kube-rbac-proxy/0.log" Nov 21 20:50:01 crc kubenswrapper[4701]: I1121 20:50:01.858252 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-96954_e2fc7504-afe1-4197-a366-c765c52366b0/manager/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.008465 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-knf78_82428420-1129-4ce6-a969-7d54bb2f0d52/operator/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.134116 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-pwg6n_565f6d5b-92e9-4fc5-9c4b-9c06b8946754/kube-rbac-proxy/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.284798 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-pwg6n_565f6d5b-92e9-4fc5-9c4b-9c06b8946754/manager/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.333646 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-6d2f7_66d77e65-ca72-473d-9697-9168a951b0c9/kube-rbac-proxy/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.393671 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7467d8c866-fgkj9_743fbd83-3b42-4083-b06f-ae81d6294066/manager/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.561065 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-9rtf7_1816b847-d41a-400a-bb1d-4f7551cfd581/manager/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.564303 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-9rtf7_1816b847-d41a-400a-bb1d-4f7551cfd581/kube-rbac-proxy/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.582850 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-6d2f7_66d77e65-ca72-473d-9697-9168a951b0c9/manager/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.742855 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5c984db885-xjww4_b571034c-9574-4a93-80e9-abbf663e6ac3/kube-rbac-proxy/0.log" Nov 21 20:50:02 crc kubenswrapper[4701]: I1121 20:50:02.834350 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5c984db885-xjww4_b571034c-9574-4a93-80e9-abbf663e6ac3/manager/0.log" Nov 21 20:50:18 crc kubenswrapper[4701]: I1121 20:50:18.613292 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:50:18 crc kubenswrapper[4701]: I1121 20:50:18.614042 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:50:18 crc kubenswrapper[4701]: I1121 20:50:18.614118 4701 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" Nov 21 20:50:18 crc kubenswrapper[4701]: I1121 20:50:18.615451 4701 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62"} pod="openshift-machine-config-operator/machine-config-daemon-tbszf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 20:50:18 crc kubenswrapper[4701]: I1121 20:50:18.615565 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" containerID="cri-o://3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" gracePeriod=600 Nov 21 20:50:18 crc kubenswrapper[4701]: E1121 20:50:18.747551 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:50:19 crc kubenswrapper[4701]: I1121 20:50:19.272425 4701 generic.go:334] "Generic (PLEG): container finished" podID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" exitCode=0 Nov 21 20:50:19 crc kubenswrapper[4701]: I1121 20:50:19.272509 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerDied","Data":"3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62"} Nov 21 20:50:19 crc kubenswrapper[4701]: I1121 20:50:19.272986 4701 scope.go:117] "RemoveContainer" containerID="61ed04b164680d4d9f55d5d2bc2dda8c52790bbd62e7733688bf5c8ae3f7d69f" Nov 21 20:50:19 crc kubenswrapper[4701]: I1121 20:50:19.273905 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:50:19 crc kubenswrapper[4701]: E1121 20:50:19.274166 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:50:21 crc kubenswrapper[4701]: I1121 20:50:21.556068 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-l7w9b_6469e01b-cfc6-4ec9-87de-29c6eeee136f/control-plane-machine-set-operator/0.log" Nov 21 20:50:21 crc kubenswrapper[4701]: I1121 20:50:21.708633 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-z6z69_f741f928-61fd-41d5-b8c8-879a4744fa2e/kube-rbac-proxy/0.log" Nov 21 20:50:21 crc kubenswrapper[4701]: I1121 20:50:21.795658 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-z6z69_f741f928-61fd-41d5-b8c8-879a4744fa2e/machine-api-operator/0.log" Nov 21 20:50:29 crc kubenswrapper[4701]: I1121 20:50:29.965647 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:50:29 crc kubenswrapper[4701]: E1121 20:50:29.968158 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:50:38 crc kubenswrapper[4701]: I1121 20:50:38.420819 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-868vc_0da0430e-e5cb-465f-8e96-49906f8c0965/cert-manager-controller/0.log" Nov 21 20:50:38 crc kubenswrapper[4701]: I1121 20:50:38.612522 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-2qrjs_325ae061-87b1-4272-a2d5-29a4fcf689f2/cert-manager-cainjector/0.log" Nov 21 20:50:38 crc kubenswrapper[4701]: I1121 20:50:38.721319 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-z4fmm_e8e88792-2751-4fcc-b8b0-dd03328e12b8/cert-manager-webhook/0.log" Nov 21 20:50:44 crc kubenswrapper[4701]: I1121 20:50:44.951906 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:50:44 crc kubenswrapper[4701]: E1121 20:50:44.952991 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:50:53 crc kubenswrapper[4701]: I1121 20:50:53.822702 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-mxxhh_cc6e0ff9-3b8f-403f-9f52-52808c29059d/nmstate-console-plugin/0.log" Nov 21 20:50:53 crc kubenswrapper[4701]: I1121 20:50:53.978314 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-7sdtf_ff85cf8f-c850-4455-92b8-c7bc1c548e68/nmstate-handler/0.log" Nov 21 20:50:54 crc kubenswrapper[4701]: I1121 20:50:54.047234 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-p7n5j_51b03b0f-062b-45d4-95b9-f965e2b69d80/kube-rbac-proxy/0.log" Nov 21 20:50:54 crc kubenswrapper[4701]: I1121 20:50:54.055177 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-p7n5j_51b03b0f-062b-45d4-95b9-f965e2b69d80/nmstate-metrics/0.log" Nov 21 20:50:54 crc kubenswrapper[4701]: I1121 20:50:54.214995 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-6fdlz_22701628-3f03-4106-ad9e-1b727e2b7c08/nmstate-operator/0.log" Nov 21 20:50:54 crc kubenswrapper[4701]: I1121 20:50:54.255374 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-pthql_45825b15-9674-46b3-b29e-7d78c4de3274/nmstate-webhook/0.log" Nov 21 20:50:56 crc kubenswrapper[4701]: I1121 20:50:56.951596 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:50:56 crc kubenswrapper[4701]: E1121 20:50:56.952076 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:51:10 crc kubenswrapper[4701]: I1121 20:51:10.951240 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:51:10 crc kubenswrapper[4701]: E1121 20:51:10.952096 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:51:11 crc kubenswrapper[4701]: I1121 20:51:11.997623 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-htbbv_f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c/kube-rbac-proxy/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.057772 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-htbbv_f0e3f5ca-8ea5-40a2-b362-49c12a9c2f0c/controller/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.253555 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.442611 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.479535 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.479806 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.517727 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.747719 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.786947 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.806694 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:51:12 crc kubenswrapper[4701]: I1121 20:51:12.820449 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.066052 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-reloader/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.118796 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/controller/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.147498 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-frr-files/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.215621 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/cp-metrics/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.357525 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/kube-rbac-proxy/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.473808 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/kube-rbac-proxy-frr/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.513912 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/frr-metrics/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.641511 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/reloader/0.log" Nov 21 20:51:13 crc kubenswrapper[4701]: I1121 20:51:13.875392 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-4zbkd_dc0b171f-3d9f-41b9-914b-ca723de8416f/frr-k8s-webhook-server/0.log" Nov 21 20:51:14 crc kubenswrapper[4701]: I1121 20:51:14.339719 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5b89f66749-2c7l9_1e13b4b2-e1e0-4f12-b8a4-e364e57407b1/manager/0.log" Nov 21 20:51:14 crc kubenswrapper[4701]: I1121 20:51:14.422891 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7fffd4c557-7pqsr_394bdfbe-ca77-47ca-837c-68023e532b01/webhook-server/0.log" Nov 21 20:51:14 crc kubenswrapper[4701]: I1121 20:51:14.677703 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-hmzkz_fa17db1f-33dc-4f0e-a191-7a01d67c575d/kube-rbac-proxy/0.log" Nov 21 20:51:15 crc kubenswrapper[4701]: I1121 20:51:15.071689 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9ssmb_5a5f9d84-176f-4592-b663-03adffd0073f/frr/0.log" Nov 21 20:51:15 crc kubenswrapper[4701]: I1121 20:51:15.212136 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-hmzkz_fa17db1f-33dc-4f0e-a191-7a01d67c575d/speaker/0.log" Nov 21 20:51:21 crc kubenswrapper[4701]: I1121 20:51:21.951807 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:51:21 crc kubenswrapper[4701]: E1121 20:51:21.953261 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.022243 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/util/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.236139 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/util/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.300552 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/pull/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.328264 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/pull/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.437884 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/util/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.468677 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/pull/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.509223 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erqm7w_dbcf8276-4c26-4faa-85dc-abc66d2004a6/extract/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.659565 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/util/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.835936 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/util/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.860245 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/pull/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.880115 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/pull/0.log" Nov 21 20:51:32 crc kubenswrapper[4701]: I1121 20:51:32.951940 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:51:32 crc kubenswrapper[4701]: E1121 20:51:32.952239 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.123578 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/util/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.156030 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/pull/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.157943 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rrlpl_57477ab7-1bf6-486a-ae7a-98cf3e893869/extract/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.321569 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-utilities/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.494311 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-utilities/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.519663 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-content/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.520752 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-content/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.760682 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-content/0.log" Nov 21 20:51:33 crc kubenswrapper[4701]: I1121 20:51:33.769529 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/extract-utilities/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.026809 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-utilities/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.283358 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-content/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.314885 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-utilities/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.393085 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-content/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.519985 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-content/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.598305 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/extract-utilities/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.632411 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-v9dtr_08bf0aaf-b621-48f2-b2b1-c6939a9a3440/registry-server/0.log" Nov 21 20:51:34 crc kubenswrapper[4701]: I1121 20:51:34.864959 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/util/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.160140 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/util/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.224282 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/pull/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.259078 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/pull/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.507393 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/util/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.592923 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2r5kn_83bec736-5bd4-4889-b0fe-864eaa0fcb3a/registry-server/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.597077 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/pull/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.617152 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6dkr9x_504f2cb2-d553-4a6e-8a22-b3c111a55808/extract/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.786379 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-7grnb_be676bd8-0b5e-48b4-829b-021f132d3247/marketplace-operator/0.log" Nov 21 20:51:35 crc kubenswrapper[4701]: I1121 20:51:35.820468 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-utilities/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.098459 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-content/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.102144 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-content/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.112163 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-utilities/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.286887 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-utilities/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.372690 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-utilities/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.427228 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/extract-content/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.540243 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ctfn6_72f39205-4f40-45be-99f5-0036f0da7491/registry-server/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.693675 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-utilities/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.699836 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-content/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.706791 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-content/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.867026 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-content/0.log" Nov 21 20:51:36 crc kubenswrapper[4701]: I1121 20:51:36.886586 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/extract-utilities/0.log" Nov 21 20:51:37 crc kubenswrapper[4701]: I1121 20:51:37.797642 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tbtgz_3e81175f-5aec-4176-b6ec-d4d292063f20/registry-server/0.log" Nov 21 20:51:47 crc kubenswrapper[4701]: I1121 20:51:47.952297 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:51:47 crc kubenswrapper[4701]: E1121 20:51:47.953258 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:51:52 crc kubenswrapper[4701]: I1121 20:51:52.770825 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-2rtjt_de3cafdb-cdad-4b38-a867-fd0e88551dc7/prometheus-operator/0.log" Nov 21 20:51:52 crc kubenswrapper[4701]: I1121 20:51:52.899811 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7f5ddff8fb-fjw5g_91073027-6c2c-4cbf-af6d-bd763b073a0b/prometheus-operator-admission-webhook/0.log" Nov 21 20:51:52 crc kubenswrapper[4701]: I1121 20:51:52.962782 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7f5ddff8fb-rv4tn_11625333-ca33-46bb-9856-a0390b6283bf/prometheus-operator-admission-webhook/0.log" Nov 21 20:51:53 crc kubenswrapper[4701]: I1121 20:51:53.079639 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-b65kf_c4bfc6b7-63e8-4ab2-a9f6-369332e97f12/operator/0.log" Nov 21 20:51:53 crc kubenswrapper[4701]: I1121 20:51:53.226365 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-bczvw_4adf7511-ec5a-47a4-9e69-c8650f1bc017/perses-operator/0.log" Nov 21 20:52:02 crc kubenswrapper[4701]: I1121 20:52:02.951849 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:52:02 crc kubenswrapper[4701]: E1121 20:52:02.952907 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:52:15 crc kubenswrapper[4701]: I1121 20:52:15.965550 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:52:15 crc kubenswrapper[4701]: E1121 20:52:15.967012 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:52:27 crc kubenswrapper[4701]: I1121 20:52:27.952115 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:52:27 crc kubenswrapper[4701]: E1121 20:52:27.953297 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:52:40 crc kubenswrapper[4701]: I1121 20:52:40.951882 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:52:40 crc kubenswrapper[4701]: E1121 20:52:40.953692 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:52:55 crc kubenswrapper[4701]: I1121 20:52:55.952328 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:52:55 crc kubenswrapper[4701]: E1121 20:52:55.953774 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.119361 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nflgq"] Nov 21 20:53:03 crc kubenswrapper[4701]: E1121 20:53:03.120692 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a02b4582-b625-4515-af6b-bef20d78d5db" containerName="container-00" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.120710 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="a02b4582-b625-4515-af6b-bef20d78d5db" containerName="container-00" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.120998 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="a02b4582-b625-4515-af6b-bef20d78d5db" containerName="container-00" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.128876 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.136000 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nflgq"] Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.260752 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57m7l\" (UniqueName: \"kubernetes.io/projected/d8134667-5d90-4a16-93de-27396a2d515f-kube-api-access-57m7l\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.260864 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-utilities\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.260890 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-catalog-content\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.365163 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-utilities\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.365234 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-catalog-content\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.365388 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57m7l\" (UniqueName: \"kubernetes.io/projected/d8134667-5d90-4a16-93de-27396a2d515f-kube-api-access-57m7l\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.366022 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-catalog-content\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.366307 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-utilities\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.393625 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57m7l\" (UniqueName: \"kubernetes.io/projected/d8134667-5d90-4a16-93de-27396a2d515f-kube-api-access-57m7l\") pod \"redhat-operators-nflgq\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.461458 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:03 crc kubenswrapper[4701]: I1121 20:53:03.975599 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nflgq"] Nov 21 20:53:04 crc kubenswrapper[4701]: I1121 20:53:04.259070 4701 generic.go:334] "Generic (PLEG): container finished" podID="d8134667-5d90-4a16-93de-27396a2d515f" containerID="8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9" exitCode=0 Nov 21 20:53:04 crc kubenswrapper[4701]: I1121 20:53:04.259121 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerDied","Data":"8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9"} Nov 21 20:53:04 crc kubenswrapper[4701]: I1121 20:53:04.259147 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerStarted","Data":"3340ed985ad7a17113bd5162841369e3b4565102a66a2396e05a6e3f07ca0702"} Nov 21 20:53:04 crc kubenswrapper[4701]: I1121 20:53:04.261576 4701 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 20:53:05 crc kubenswrapper[4701]: I1121 20:53:05.274900 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerStarted","Data":"196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7"} Nov 21 20:53:09 crc kubenswrapper[4701]: I1121 20:53:09.332060 4701 generic.go:334] "Generic (PLEG): container finished" podID="d8134667-5d90-4a16-93de-27396a2d515f" containerID="196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7" exitCode=0 Nov 21 20:53:09 crc kubenswrapper[4701]: I1121 20:53:09.332159 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerDied","Data":"196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7"} Nov 21 20:53:09 crc kubenswrapper[4701]: I1121 20:53:09.966174 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:53:09 crc kubenswrapper[4701]: E1121 20:53:09.966582 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:53:10 crc kubenswrapper[4701]: I1121 20:53:10.346846 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerStarted","Data":"a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417"} Nov 21 20:53:10 crc kubenswrapper[4701]: I1121 20:53:10.368119 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nflgq" podStartSLOduration=1.804372335 podStartE2EDuration="7.36808114s" podCreationTimestamp="2025-11-21 20:53:03 +0000 UTC" firstStartedPulling="2025-11-21 20:53:04.261344237 +0000 UTC m=+6675.046484274" lastFinishedPulling="2025-11-21 20:53:09.825053022 +0000 UTC m=+6680.610193079" observedRunningTime="2025-11-21 20:53:10.368006498 +0000 UTC m=+6681.153146535" watchObservedRunningTime="2025-11-21 20:53:10.36808114 +0000 UTC m=+6681.153221197" Nov 21 20:53:12 crc kubenswrapper[4701]: I1121 20:53:12.918692 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z44z6"] Nov 21 20:53:12 crc kubenswrapper[4701]: I1121 20:53:12.922288 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:12 crc kubenswrapper[4701]: I1121 20:53:12.937095 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z44z6"] Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.044343 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-catalog-content\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.044882 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsz7g\" (UniqueName: \"kubernetes.io/projected/ebebb593-de66-4aa7-9c9a-cad4250dbc65-kube-api-access-bsz7g\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.044951 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-utilities\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.148034 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsz7g\" (UniqueName: \"kubernetes.io/projected/ebebb593-de66-4aa7-9c9a-cad4250dbc65-kube-api-access-bsz7g\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.148854 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-utilities\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.149556 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-catalog-content\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.150109 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-catalog-content\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.149440 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-utilities\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.195535 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsz7g\" (UniqueName: \"kubernetes.io/projected/ebebb593-de66-4aa7-9c9a-cad4250dbc65-kube-api-access-bsz7g\") pod \"redhat-marketplace-z44z6\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.252491 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.463891 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.464404 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:13 crc kubenswrapper[4701]: I1121 20:53:13.924416 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z44z6"] Nov 21 20:53:14 crc kubenswrapper[4701]: I1121 20:53:14.401580 4701 generic.go:334] "Generic (PLEG): container finished" podID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerID="72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162" exitCode=0 Nov 21 20:53:14 crc kubenswrapper[4701]: I1121 20:53:14.401741 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerDied","Data":"72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162"} Nov 21 20:53:14 crc kubenswrapper[4701]: I1121 20:53:14.401975 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerStarted","Data":"3c64906a2371e882a6dc4b4b51f38bce83b920aba9386f90d834dbfe4aa9a917"} Nov 21 20:53:14 crc kubenswrapper[4701]: I1121 20:53:14.522433 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nflgq" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" probeResult="failure" output=< Nov 21 20:53:14 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:53:14 crc kubenswrapper[4701]: > Nov 21 20:53:15 crc kubenswrapper[4701]: I1121 20:53:15.415982 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerStarted","Data":"eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474"} Nov 21 20:53:16 crc kubenswrapper[4701]: I1121 20:53:16.427916 4701 generic.go:334] "Generic (PLEG): container finished" podID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerID="eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474" exitCode=0 Nov 21 20:53:16 crc kubenswrapper[4701]: I1121 20:53:16.427965 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerDied","Data":"eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474"} Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.444595 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerStarted","Data":"fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072"} Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.473167 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z44z6" podStartSLOduration=3.030690066 podStartE2EDuration="5.473146061s" podCreationTimestamp="2025-11-21 20:53:12 +0000 UTC" firstStartedPulling="2025-11-21 20:53:14.404977336 +0000 UTC m=+6685.190117353" lastFinishedPulling="2025-11-21 20:53:16.847433321 +0000 UTC m=+6687.632573348" observedRunningTime="2025-11-21 20:53:17.466352811 +0000 UTC m=+6688.251492848" watchObservedRunningTime="2025-11-21 20:53:17.473146061 +0000 UTC m=+6688.258286098" Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.905939 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4pc2l"] Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.910349 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.932236 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4pc2l"] Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.997092 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6pf5\" (UniqueName: \"kubernetes.io/projected/483bf6a5-c70e-42ab-b641-c7dcad59f123-kube-api-access-c6pf5\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.997268 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-catalog-content\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:17 crc kubenswrapper[4701]: I1121 20:53:17.997325 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-utilities\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.100538 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6pf5\" (UniqueName: \"kubernetes.io/projected/483bf6a5-c70e-42ab-b641-c7dcad59f123-kube-api-access-c6pf5\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.100607 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-catalog-content\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.100629 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-utilities\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.101392 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-utilities\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.101407 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-catalog-content\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.122385 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6pf5\" (UniqueName: \"kubernetes.io/projected/483bf6a5-c70e-42ab-b641-c7dcad59f123-kube-api-access-c6pf5\") pod \"certified-operators-4pc2l\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.239018 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:18 crc kubenswrapper[4701]: I1121 20:53:18.801347 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4pc2l"] Nov 21 20:53:19 crc kubenswrapper[4701]: I1121 20:53:19.499365 4701 generic.go:334] "Generic (PLEG): container finished" podID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerID="2c69c46ba96d26f5da4de40e3aba1689c2cd76f410f15dbed014ed7383a8f4c9" exitCode=0 Nov 21 20:53:19 crc kubenswrapper[4701]: I1121 20:53:19.499427 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerDied","Data":"2c69c46ba96d26f5da4de40e3aba1689c2cd76f410f15dbed014ed7383a8f4c9"} Nov 21 20:53:19 crc kubenswrapper[4701]: I1121 20:53:19.499823 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerStarted","Data":"ed076a1fabf9c6f6d00ed1a5a035c5aafbf14edd4ec4fa4ff0aaa7e62ab9bb79"} Nov 21 20:53:20 crc kubenswrapper[4701]: I1121 20:53:20.516844 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerStarted","Data":"38345219dc104a722eb7480bdd437dff8a105231f7e9e318fb784aafde3c3851"} Nov 21 20:53:22 crc kubenswrapper[4701]: I1121 20:53:22.541836 4701 generic.go:334] "Generic (PLEG): container finished" podID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerID="38345219dc104a722eb7480bdd437dff8a105231f7e9e318fb784aafde3c3851" exitCode=0 Nov 21 20:53:22 crc kubenswrapper[4701]: I1121 20:53:22.542254 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerDied","Data":"38345219dc104a722eb7480bdd437dff8a105231f7e9e318fb784aafde3c3851"} Nov 21 20:53:23 crc kubenswrapper[4701]: I1121 20:53:23.254269 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:23 crc kubenswrapper[4701]: I1121 20:53:23.254722 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:23 crc kubenswrapper[4701]: I1121 20:53:23.313876 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:23 crc kubenswrapper[4701]: I1121 20:53:23.564236 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerStarted","Data":"e74dcd1348c6f79f77314330bf366f6f209155742706f267e5afe4ab7deceee1"} Nov 21 20:53:23 crc kubenswrapper[4701]: I1121 20:53:23.597417 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4pc2l" podStartSLOduration=3.1503838220000002 podStartE2EDuration="6.597395131s" podCreationTimestamp="2025-11-21 20:53:17 +0000 UTC" firstStartedPulling="2025-11-21 20:53:19.501671788 +0000 UTC m=+6690.286811855" lastFinishedPulling="2025-11-21 20:53:22.948683137 +0000 UTC m=+6693.733823164" observedRunningTime="2025-11-21 20:53:23.580799459 +0000 UTC m=+6694.365939476" watchObservedRunningTime="2025-11-21 20:53:23.597395131 +0000 UTC m=+6694.382535158" Nov 21 20:53:23 crc kubenswrapper[4701]: I1121 20:53:23.649944 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:24 crc kubenswrapper[4701]: I1121 20:53:24.523346 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nflgq" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" probeResult="failure" output=< Nov 21 20:53:24 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:53:24 crc kubenswrapper[4701]: > Nov 21 20:53:24 crc kubenswrapper[4701]: I1121 20:53:24.951466 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:53:24 crc kubenswrapper[4701]: E1121 20:53:24.952284 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:53:25 crc kubenswrapper[4701]: I1121 20:53:25.290275 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z44z6"] Nov 21 20:53:25 crc kubenswrapper[4701]: I1121 20:53:25.583813 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z44z6" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="registry-server" containerID="cri-o://fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072" gracePeriod=2 Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.125517 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.275358 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-utilities\") pod \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.275420 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsz7g\" (UniqueName: \"kubernetes.io/projected/ebebb593-de66-4aa7-9c9a-cad4250dbc65-kube-api-access-bsz7g\") pod \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.275451 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-catalog-content\") pod \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\" (UID: \"ebebb593-de66-4aa7-9c9a-cad4250dbc65\") " Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.276403 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-utilities" (OuterVolumeSpecName: "utilities") pod "ebebb593-de66-4aa7-9c9a-cad4250dbc65" (UID: "ebebb593-de66-4aa7-9c9a-cad4250dbc65"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.277295 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.286457 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebebb593-de66-4aa7-9c9a-cad4250dbc65-kube-api-access-bsz7g" (OuterVolumeSpecName: "kube-api-access-bsz7g") pod "ebebb593-de66-4aa7-9c9a-cad4250dbc65" (UID: "ebebb593-de66-4aa7-9c9a-cad4250dbc65"). InnerVolumeSpecName "kube-api-access-bsz7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.302554 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ebebb593-de66-4aa7-9c9a-cad4250dbc65" (UID: "ebebb593-de66-4aa7-9c9a-cad4250dbc65"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.379475 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsz7g\" (UniqueName: \"kubernetes.io/projected/ebebb593-de66-4aa7-9c9a-cad4250dbc65-kube-api-access-bsz7g\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.379511 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebebb593-de66-4aa7-9c9a-cad4250dbc65-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.599075 4701 generic.go:334] "Generic (PLEG): container finished" podID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerID="fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072" exitCode=0 Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.599119 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerDied","Data":"fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072"} Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.599143 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z44z6" event={"ID":"ebebb593-de66-4aa7-9c9a-cad4250dbc65","Type":"ContainerDied","Data":"3c64906a2371e882a6dc4b4b51f38bce83b920aba9386f90d834dbfe4aa9a917"} Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.599159 4701 scope.go:117] "RemoveContainer" containerID="fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.599279 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z44z6" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.626081 4701 scope.go:117] "RemoveContainer" containerID="eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.649014 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z44z6"] Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.658181 4701 scope.go:117] "RemoveContainer" containerID="72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.661211 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z44z6"] Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.711491 4701 scope.go:117] "RemoveContainer" containerID="fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072" Nov 21 20:53:26 crc kubenswrapper[4701]: E1121 20:53:26.712038 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072\": container with ID starting with fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072 not found: ID does not exist" containerID="fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.712079 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072"} err="failed to get container status \"fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072\": rpc error: code = NotFound desc = could not find container \"fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072\": container with ID starting with fdc8f26e9d818a5d00851cf9cfba5335bc9486c1c996101ab446c5d06801b072 not found: ID does not exist" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.712111 4701 scope.go:117] "RemoveContainer" containerID="eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474" Nov 21 20:53:26 crc kubenswrapper[4701]: E1121 20:53:26.712549 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474\": container with ID starting with eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474 not found: ID does not exist" containerID="eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.712579 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474"} err="failed to get container status \"eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474\": rpc error: code = NotFound desc = could not find container \"eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474\": container with ID starting with eabbdcf00ca93e774cb2312e0c3f29e7004d365f8ee2b4f786b7ee892ff81474 not found: ID does not exist" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.712598 4701 scope.go:117] "RemoveContainer" containerID="72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162" Nov 21 20:53:26 crc kubenswrapper[4701]: E1121 20:53:26.712968 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162\": container with ID starting with 72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162 not found: ID does not exist" containerID="72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162" Nov 21 20:53:26 crc kubenswrapper[4701]: I1121 20:53:26.713022 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162"} err="failed to get container status \"72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162\": rpc error: code = NotFound desc = could not find container \"72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162\": container with ID starting with 72c783963f6ea84951986cf024b25d11f2df2d09b6f7f8cbd957fd4e43538162 not found: ID does not exist" Nov 21 20:53:27 crc kubenswrapper[4701]: I1121 20:53:27.965928 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" path="/var/lib/kubelet/pods/ebebb593-de66-4aa7-9c9a-cad4250dbc65/volumes" Nov 21 20:53:28 crc kubenswrapper[4701]: I1121 20:53:28.240352 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:28 crc kubenswrapper[4701]: I1121 20:53:28.240418 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:28 crc kubenswrapper[4701]: I1121 20:53:28.324862 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:28 crc kubenswrapper[4701]: I1121 20:53:28.695589 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:29 crc kubenswrapper[4701]: I1121 20:53:29.680686 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4pc2l"] Nov 21 20:53:30 crc kubenswrapper[4701]: I1121 20:53:30.651827 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4pc2l" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="registry-server" containerID="cri-o://e74dcd1348c6f79f77314330bf366f6f209155742706f267e5afe4ab7deceee1" gracePeriod=2 Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.677599 4701 generic.go:334] "Generic (PLEG): container finished" podID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerID="e74dcd1348c6f79f77314330bf366f6f209155742706f267e5afe4ab7deceee1" exitCode=0 Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.677669 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerDied","Data":"e74dcd1348c6f79f77314330bf366f6f209155742706f267e5afe4ab7deceee1"} Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.678316 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4pc2l" event={"ID":"483bf6a5-c70e-42ab-b641-c7dcad59f123","Type":"ContainerDied","Data":"ed076a1fabf9c6f6d00ed1a5a035c5aafbf14edd4ec4fa4ff0aaa7e62ab9bb79"} Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.678336 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed076a1fabf9c6f6d00ed1a5a035c5aafbf14edd4ec4fa4ff0aaa7e62ab9bb79" Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.760898 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.948843 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6pf5\" (UniqueName: \"kubernetes.io/projected/483bf6a5-c70e-42ab-b641-c7dcad59f123-kube-api-access-c6pf5\") pod \"483bf6a5-c70e-42ab-b641-c7dcad59f123\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.948950 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-utilities\") pod \"483bf6a5-c70e-42ab-b641-c7dcad59f123\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.949172 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-catalog-content\") pod \"483bf6a5-c70e-42ab-b641-c7dcad59f123\" (UID: \"483bf6a5-c70e-42ab-b641-c7dcad59f123\") " Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.950083 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-utilities" (OuterVolumeSpecName: "utilities") pod "483bf6a5-c70e-42ab-b641-c7dcad59f123" (UID: "483bf6a5-c70e-42ab-b641-c7dcad59f123"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.965963 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/483bf6a5-c70e-42ab-b641-c7dcad59f123-kube-api-access-c6pf5" (OuterVolumeSpecName: "kube-api-access-c6pf5") pod "483bf6a5-c70e-42ab-b641-c7dcad59f123" (UID: "483bf6a5-c70e-42ab-b641-c7dcad59f123"). InnerVolumeSpecName "kube-api-access-c6pf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:53:31 crc kubenswrapper[4701]: I1121 20:53:31.986618 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "483bf6a5-c70e-42ab-b641-c7dcad59f123" (UID: "483bf6a5-c70e-42ab-b641-c7dcad59f123"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:53:32 crc kubenswrapper[4701]: I1121 20:53:32.052646 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6pf5\" (UniqueName: \"kubernetes.io/projected/483bf6a5-c70e-42ab-b641-c7dcad59f123-kube-api-access-c6pf5\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:32 crc kubenswrapper[4701]: I1121 20:53:32.052692 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:32 crc kubenswrapper[4701]: I1121 20:53:32.052703 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/483bf6a5-c70e-42ab-b641-c7dcad59f123-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:32 crc kubenswrapper[4701]: I1121 20:53:32.689041 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4pc2l" Nov 21 20:53:32 crc kubenswrapper[4701]: I1121 20:53:32.735608 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4pc2l"] Nov 21 20:53:32 crc kubenswrapper[4701]: I1121 20:53:32.751190 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4pc2l"] Nov 21 20:53:33 crc kubenswrapper[4701]: I1121 20:53:33.971527 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" path="/var/lib/kubelet/pods/483bf6a5-c70e-42ab-b641-c7dcad59f123/volumes" Nov 21 20:53:34 crc kubenswrapper[4701]: I1121 20:53:34.531100 4701 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nflgq" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" probeResult="failure" output=< Nov 21 20:53:34 crc kubenswrapper[4701]: timeout: failed to connect service ":50051" within 1s Nov 21 20:53:34 crc kubenswrapper[4701]: > Nov 21 20:53:37 crc kubenswrapper[4701]: I1121 20:53:37.963994 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:53:37 crc kubenswrapper[4701]: E1121 20:53:37.965376 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:53:43 crc kubenswrapper[4701]: I1121 20:53:43.558414 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:43 crc kubenswrapper[4701]: I1121 20:53:43.632478 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:44 crc kubenswrapper[4701]: I1121 20:53:44.114351 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nflgq"] Nov 21 20:53:44 crc kubenswrapper[4701]: I1121 20:53:44.855431 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nflgq" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" containerID="cri-o://a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417" gracePeriod=2 Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.471496 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.615645 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-catalog-content\") pod \"d8134667-5d90-4a16-93de-27396a2d515f\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.621824 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-utilities\") pod \"d8134667-5d90-4a16-93de-27396a2d515f\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.622563 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57m7l\" (UniqueName: \"kubernetes.io/projected/d8134667-5d90-4a16-93de-27396a2d515f-kube-api-access-57m7l\") pod \"d8134667-5d90-4a16-93de-27396a2d515f\" (UID: \"d8134667-5d90-4a16-93de-27396a2d515f\") " Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.622907 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-utilities" (OuterVolumeSpecName: "utilities") pod "d8134667-5d90-4a16-93de-27396a2d515f" (UID: "d8134667-5d90-4a16-93de-27396a2d515f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.623637 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.634648 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8134667-5d90-4a16-93de-27396a2d515f-kube-api-access-57m7l" (OuterVolumeSpecName: "kube-api-access-57m7l") pod "d8134667-5d90-4a16-93de-27396a2d515f" (UID: "d8134667-5d90-4a16-93de-27396a2d515f"). InnerVolumeSpecName "kube-api-access-57m7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.700824 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8134667-5d90-4a16-93de-27396a2d515f" (UID: "d8134667-5d90-4a16-93de-27396a2d515f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.725257 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8134667-5d90-4a16-93de-27396a2d515f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.725284 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57m7l\" (UniqueName: \"kubernetes.io/projected/d8134667-5d90-4a16-93de-27396a2d515f-kube-api-access-57m7l\") on node \"crc\" DevicePath \"\"" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.871529 4701 generic.go:334] "Generic (PLEG): container finished" podID="d8134667-5d90-4a16-93de-27396a2d515f" containerID="a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417" exitCode=0 Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.871583 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerDied","Data":"a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417"} Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.871602 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nflgq" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.871638 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nflgq" event={"ID":"d8134667-5d90-4a16-93de-27396a2d515f","Type":"ContainerDied","Data":"3340ed985ad7a17113bd5162841369e3b4565102a66a2396e05a6e3f07ca0702"} Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.871662 4701 scope.go:117] "RemoveContainer" containerID="a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.927546 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nflgq"] Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.933992 4701 scope.go:117] "RemoveContainer" containerID="196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.936922 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nflgq"] Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.962679 4701 scope.go:117] "RemoveContainer" containerID="8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9" Nov 21 20:53:45 crc kubenswrapper[4701]: I1121 20:53:45.976025 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8134667-5d90-4a16-93de-27396a2d515f" path="/var/lib/kubelet/pods/d8134667-5d90-4a16-93de-27396a2d515f/volumes" Nov 21 20:53:46 crc kubenswrapper[4701]: I1121 20:53:46.038979 4701 scope.go:117] "RemoveContainer" containerID="a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417" Nov 21 20:53:46 crc kubenswrapper[4701]: E1121 20:53:46.039570 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417\": container with ID starting with a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417 not found: ID does not exist" containerID="a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417" Nov 21 20:53:46 crc kubenswrapper[4701]: I1121 20:53:46.039628 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417"} err="failed to get container status \"a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417\": rpc error: code = NotFound desc = could not find container \"a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417\": container with ID starting with a038b1643523266adb533a283867bb68e5392cfef9132c70fb1b6a0c5733e417 not found: ID does not exist" Nov 21 20:53:46 crc kubenswrapper[4701]: I1121 20:53:46.039666 4701 scope.go:117] "RemoveContainer" containerID="196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7" Nov 21 20:53:46 crc kubenswrapper[4701]: E1121 20:53:46.040003 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7\": container with ID starting with 196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7 not found: ID does not exist" containerID="196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7" Nov 21 20:53:46 crc kubenswrapper[4701]: I1121 20:53:46.040059 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7"} err="failed to get container status \"196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7\": rpc error: code = NotFound desc = could not find container \"196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7\": container with ID starting with 196f8840390555ac1450a06821452c67262b39e6171f54fc6876910d6103d8f7 not found: ID does not exist" Nov 21 20:53:46 crc kubenswrapper[4701]: I1121 20:53:46.040085 4701 scope.go:117] "RemoveContainer" containerID="8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9" Nov 21 20:53:46 crc kubenswrapper[4701]: E1121 20:53:46.040456 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9\": container with ID starting with 8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9 not found: ID does not exist" containerID="8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9" Nov 21 20:53:46 crc kubenswrapper[4701]: I1121 20:53:46.040516 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9"} err="failed to get container status \"8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9\": rpc error: code = NotFound desc = could not find container \"8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9\": container with ID starting with 8a98bb7315d6d70f11ac3aa31ad6953fc99c1a6ce2b1fd24efb6cc65079970b9 not found: ID does not exist" Nov 21 20:53:49 crc kubenswrapper[4701]: I1121 20:53:49.963786 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:53:49 crc kubenswrapper[4701]: E1121 20:53:49.964988 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:54:03 crc kubenswrapper[4701]: I1121 20:54:03.952721 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:54:03 crc kubenswrapper[4701]: E1121 20:54:03.954441 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:54:06 crc kubenswrapper[4701]: I1121 20:54:06.173577 4701 generic.go:334] "Generic (PLEG): container finished" podID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerID="9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33" exitCode=0 Nov 21 20:54:06 crc kubenswrapper[4701]: I1121 20:54:06.173662 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-scggw/must-gather-lv2mj" event={"ID":"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a","Type":"ContainerDied","Data":"9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33"} Nov 21 20:54:06 crc kubenswrapper[4701]: I1121 20:54:06.174720 4701 scope.go:117] "RemoveContainer" containerID="9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33" Nov 21 20:54:06 crc kubenswrapper[4701]: I1121 20:54:06.902611 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-scggw_must-gather-lv2mj_85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a/gather/0.log" Nov 21 20:54:15 crc kubenswrapper[4701]: I1121 20:54:15.952486 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:54:15 crc kubenswrapper[4701]: E1121 20:54:15.954549 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:54:21 crc kubenswrapper[4701]: I1121 20:54:21.406922 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-scggw/must-gather-lv2mj"] Nov 21 20:54:21 crc kubenswrapper[4701]: I1121 20:54:21.408623 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-scggw/must-gather-lv2mj" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="copy" containerID="cri-o://96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290" gracePeriod=2 Nov 21 20:54:21 crc kubenswrapper[4701]: I1121 20:54:21.429734 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-scggw/must-gather-lv2mj"] Nov 21 20:54:21 crc kubenswrapper[4701]: I1121 20:54:21.988834 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-scggw_must-gather-lv2mj_85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a/copy/0.log" Nov 21 20:54:21 crc kubenswrapper[4701]: I1121 20:54:21.989538 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.159123 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-must-gather-output\") pod \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.159331 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vszxn\" (UniqueName: \"kubernetes.io/projected/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-kube-api-access-vszxn\") pod \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\" (UID: \"85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a\") " Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.169519 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-kube-api-access-vszxn" (OuterVolumeSpecName: "kube-api-access-vszxn") pod "85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" (UID: "85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a"). InnerVolumeSpecName "kube-api-access-vszxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.262025 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vszxn\" (UniqueName: \"kubernetes.io/projected/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-kube-api-access-vszxn\") on node \"crc\" DevicePath \"\"" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.412787 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" (UID: "85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.420279 4701 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-scggw_must-gather-lv2mj_85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a/copy/0.log" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.420697 4701 generic.go:334] "Generic (PLEG): container finished" podID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerID="96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290" exitCode=143 Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.420775 4701 scope.go:117] "RemoveContainer" containerID="96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.420854 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-scggw/must-gather-lv2mj" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.448494 4701 scope.go:117] "RemoveContainer" containerID="9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.469133 4701 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.517946 4701 scope.go:117] "RemoveContainer" containerID="96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290" Nov 21 20:54:22 crc kubenswrapper[4701]: E1121 20:54:22.518742 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290\": container with ID starting with 96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290 not found: ID does not exist" containerID="96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.518797 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290"} err="failed to get container status \"96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290\": rpc error: code = NotFound desc = could not find container \"96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290\": container with ID starting with 96e91fa25045e54efcd740e569948d8e8c37e0358d6aaaa127d126c5f7b0f290 not found: ID does not exist" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.518830 4701 scope.go:117] "RemoveContainer" containerID="9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33" Nov 21 20:54:22 crc kubenswrapper[4701]: E1121 20:54:22.519375 4701 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33\": container with ID starting with 9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33 not found: ID does not exist" containerID="9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33" Nov 21 20:54:22 crc kubenswrapper[4701]: I1121 20:54:22.519552 4701 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33"} err="failed to get container status \"9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33\": rpc error: code = NotFound desc = could not find container \"9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33\": container with ID starting with 9e65fbc771a52d5058e4a191042f9d704229cea96833151cb2eba56970f79e33 not found: ID does not exist" Nov 21 20:54:23 crc kubenswrapper[4701]: I1121 20:54:23.974674 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" path="/var/lib/kubelet/pods/85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a/volumes" Nov 21 20:54:28 crc kubenswrapper[4701]: I1121 20:54:28.952980 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:54:28 crc kubenswrapper[4701]: E1121 20:54:28.955088 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.325811 4701 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5vz7w"] Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328517 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="extract-content" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328534 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="extract-content" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328547 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="extract-content" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328570 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="extract-content" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328582 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="gather" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328598 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="gather" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328616 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="extract-utilities" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328639 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="extract-utilities" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328652 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328659 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328688 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="copy" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328709 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="copy" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328744 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="extract-utilities" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328750 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="extract-utilities" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328760 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="extract-utilities" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328766 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="extract-utilities" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328788 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328794 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328809 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="extract-content" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328815 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="extract-content" Nov 21 20:54:39 crc kubenswrapper[4701]: E1121 20:54:39.328830 4701 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.328837 4701 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.329148 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="483bf6a5-c70e-42ab-b641-c7dcad59f123" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.329175 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="copy" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.329186 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="85c412eb-d5a7-4a82-9f26-4dc70fbbaf8a" containerName="gather" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.329227 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebebb593-de66-4aa7-9c9a-cad4250dbc65" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.329238 4701 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8134667-5d90-4a16-93de-27396a2d515f" containerName="registry-server" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.331361 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.356909 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5vz7w"] Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.449854 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfsxk\" (UniqueName: \"kubernetes.io/projected/29a6f30a-3263-48fe-90c5-91ec607eeb16-kube-api-access-rfsxk\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.450034 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-utilities\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.450180 4701 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-catalog-content\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.553880 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-utilities\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.554063 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-catalog-content\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.554103 4701 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfsxk\" (UniqueName: \"kubernetes.io/projected/29a6f30a-3263-48fe-90c5-91ec607eeb16-kube-api-access-rfsxk\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.555045 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-utilities\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.555366 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-catalog-content\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.576598 4701 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfsxk\" (UniqueName: \"kubernetes.io/projected/29a6f30a-3263-48fe-90c5-91ec607eeb16-kube-api-access-rfsxk\") pod \"community-operators-5vz7w\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:39 crc kubenswrapper[4701]: I1121 20:54:39.664522 4701 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:40 crc kubenswrapper[4701]: I1121 20:54:40.227934 4701 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5vz7w"] Nov 21 20:54:40 crc kubenswrapper[4701]: I1121 20:54:40.679335 4701 generic.go:334] "Generic (PLEG): container finished" podID="29a6f30a-3263-48fe-90c5-91ec607eeb16" containerID="1cfce1999a1b41f136f31f57ad157709030c7415c0cbce681ea3a2d3da47511e" exitCode=0 Nov 21 20:54:40 crc kubenswrapper[4701]: I1121 20:54:40.679404 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerDied","Data":"1cfce1999a1b41f136f31f57ad157709030c7415c0cbce681ea3a2d3da47511e"} Nov 21 20:54:40 crc kubenswrapper[4701]: I1121 20:54:40.679443 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerStarted","Data":"2512febcdd08faa1dbe8668f69fc19484ae4811fb730f393712afc7dcbad1587"} Nov 21 20:54:41 crc kubenswrapper[4701]: I1121 20:54:41.696144 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerStarted","Data":"ddc5a2db1e17bd404fef6097e6d018e9417d0819833934a5b21b33dceae5e92f"} Nov 21 20:54:43 crc kubenswrapper[4701]: I1121 20:54:43.726855 4701 generic.go:334] "Generic (PLEG): container finished" podID="29a6f30a-3263-48fe-90c5-91ec607eeb16" containerID="ddc5a2db1e17bd404fef6097e6d018e9417d0819833934a5b21b33dceae5e92f" exitCode=0 Nov 21 20:54:43 crc kubenswrapper[4701]: I1121 20:54:43.726938 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerDied","Data":"ddc5a2db1e17bd404fef6097e6d018e9417d0819833934a5b21b33dceae5e92f"} Nov 21 20:54:43 crc kubenswrapper[4701]: I1121 20:54:43.952128 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:54:43 crc kubenswrapper[4701]: E1121 20:54:43.952518 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:54:44 crc kubenswrapper[4701]: I1121 20:54:44.761804 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerStarted","Data":"a1115d9056645df0ee068e94fe462f88f20da15c2f6845557ef043301d8cdb9f"} Nov 21 20:54:44 crc kubenswrapper[4701]: I1121 20:54:44.801998 4701 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5vz7w" podStartSLOduration=2.35158875 podStartE2EDuration="5.801970761s" podCreationTimestamp="2025-11-21 20:54:39 +0000 UTC" firstStartedPulling="2025-11-21 20:54:40.685128693 +0000 UTC m=+6771.470268730" lastFinishedPulling="2025-11-21 20:54:44.135510674 +0000 UTC m=+6774.920650741" observedRunningTime="2025-11-21 20:54:44.787585327 +0000 UTC m=+6775.572725394" watchObservedRunningTime="2025-11-21 20:54:44.801970761 +0000 UTC m=+6775.587110798" Nov 21 20:54:49 crc kubenswrapper[4701]: I1121 20:54:49.665671 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:49 crc kubenswrapper[4701]: I1121 20:54:49.667476 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:49 crc kubenswrapper[4701]: I1121 20:54:49.720791 4701 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:49 crc kubenswrapper[4701]: I1121 20:54:49.906893 4701 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:50 crc kubenswrapper[4701]: I1121 20:54:50.001433 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5vz7w"] Nov 21 20:54:51 crc kubenswrapper[4701]: I1121 20:54:51.857441 4701 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5vz7w" podUID="29a6f30a-3263-48fe-90c5-91ec607eeb16" containerName="registry-server" containerID="cri-o://a1115d9056645df0ee068e94fe462f88f20da15c2f6845557ef043301d8cdb9f" gracePeriod=2 Nov 21 20:54:52 crc kubenswrapper[4701]: I1121 20:54:52.871162 4701 generic.go:334] "Generic (PLEG): container finished" podID="29a6f30a-3263-48fe-90c5-91ec607eeb16" containerID="a1115d9056645df0ee068e94fe462f88f20da15c2f6845557ef043301d8cdb9f" exitCode=0 Nov 21 20:54:52 crc kubenswrapper[4701]: I1121 20:54:52.871241 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerDied","Data":"a1115d9056645df0ee068e94fe462f88f20da15c2f6845557ef043301d8cdb9f"} Nov 21 20:54:52 crc kubenswrapper[4701]: I1121 20:54:52.871736 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5vz7w" event={"ID":"29a6f30a-3263-48fe-90c5-91ec607eeb16","Type":"ContainerDied","Data":"2512febcdd08faa1dbe8668f69fc19484ae4811fb730f393712afc7dcbad1587"} Nov 21 20:54:52 crc kubenswrapper[4701]: I1121 20:54:52.871766 4701 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2512febcdd08faa1dbe8668f69fc19484ae4811fb730f393712afc7dcbad1587" Nov 21 20:54:52 crc kubenswrapper[4701]: I1121 20:54:52.957698 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.076557 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfsxk\" (UniqueName: \"kubernetes.io/projected/29a6f30a-3263-48fe-90c5-91ec607eeb16-kube-api-access-rfsxk\") pod \"29a6f30a-3263-48fe-90c5-91ec607eeb16\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.076643 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-utilities\") pod \"29a6f30a-3263-48fe-90c5-91ec607eeb16\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.077194 4701 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-catalog-content\") pod \"29a6f30a-3263-48fe-90c5-91ec607eeb16\" (UID: \"29a6f30a-3263-48fe-90c5-91ec607eeb16\") " Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.077682 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-utilities" (OuterVolumeSpecName: "utilities") pod "29a6f30a-3263-48fe-90c5-91ec607eeb16" (UID: "29a6f30a-3263-48fe-90c5-91ec607eeb16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.077850 4701 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.083648 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29a6f30a-3263-48fe-90c5-91ec607eeb16-kube-api-access-rfsxk" (OuterVolumeSpecName: "kube-api-access-rfsxk") pod "29a6f30a-3263-48fe-90c5-91ec607eeb16" (UID: "29a6f30a-3263-48fe-90c5-91ec607eeb16"). InnerVolumeSpecName "kube-api-access-rfsxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.158828 4701 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29a6f30a-3263-48fe-90c5-91ec607eeb16" (UID: "29a6f30a-3263-48fe-90c5-91ec607eeb16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.179813 4701 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a6f30a-3263-48fe-90c5-91ec607eeb16-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.179846 4701 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfsxk\" (UniqueName: \"kubernetes.io/projected/29a6f30a-3263-48fe-90c5-91ec607eeb16-kube-api-access-rfsxk\") on node \"crc\" DevicePath \"\"" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.883150 4701 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5vz7w" Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.947188 4701 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5vz7w"] Nov 21 20:54:53 crc kubenswrapper[4701]: I1121 20:54:53.968782 4701 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5vz7w"] Nov 21 20:54:55 crc kubenswrapper[4701]: I1121 20:54:55.970769 4701 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29a6f30a-3263-48fe-90c5-91ec607eeb16" path="/var/lib/kubelet/pods/29a6f30a-3263-48fe-90c5-91ec607eeb16/volumes" Nov 21 20:54:57 crc kubenswrapper[4701]: I1121 20:54:57.951875 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:54:57 crc kubenswrapper[4701]: E1121 20:54:57.952951 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:55:11 crc kubenswrapper[4701]: I1121 20:55:11.952790 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:55:11 crc kubenswrapper[4701]: E1121 20:55:11.954080 4701 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-tbszf_openshift-machine-config-operator(e70a068b-c06b-4ffe-8496-6f55c321d614)\"" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" Nov 21 20:55:23 crc kubenswrapper[4701]: I1121 20:55:23.952373 4701 scope.go:117] "RemoveContainer" containerID="3d797f0000302550ad2e1909fd94b82c08f3845841d76d73d89e762960062c62" Nov 21 20:55:24 crc kubenswrapper[4701]: I1121 20:55:24.312734 4701 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" event={"ID":"e70a068b-c06b-4ffe-8496-6f55c321d614","Type":"ContainerStarted","Data":"958ef6418802da732d7be39f5158e5762bee7d652bcffc4d065cd38ed0e8a02d"} Nov 21 20:57:48 crc kubenswrapper[4701]: I1121 20:57:48.614050 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:57:48 crc kubenswrapper[4701]: I1121 20:57:48.615395 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 20:58:18 crc kubenswrapper[4701]: I1121 20:58:18.613767 4701 patch_prober.go:28] interesting pod/machine-config-daemon-tbszf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 20:58:18 crc kubenswrapper[4701]: I1121 20:58:18.614841 4701 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-tbszf" podUID="e70a068b-c06b-4ffe-8496-6f55c321d614" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110151212024432 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110151212017347 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110133067016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110133067015452 5ustar corecore